diff options
Diffstat (limited to 'arch/arm/mm/init.c')
| -rw-r--r-- | arch/arm/mm/init.c | 895 |
1 files changed, 435 insertions, 460 deletions
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index edffa47a4b2..659c75d808d 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -1,66 +1,113 @@ /* * linux/arch/arm/mm/init.c * - * Copyright (C) 1995-2002 Russell King + * Copyright (C) 1995-2005 Russell King * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ -#include <linux/config.h> #include <linux/kernel.h> #include <linux/errno.h> -#include <linux/ptrace.h> #include <linux/swap.h> #include <linux/init.h> #include <linux/bootmem.h> #include <linux/mman.h> +#include <linux/export.h> #include <linux/nodemask.h> #include <linux/initrd.h> - +#include <linux/of_fdt.h> +#include <linux/highmem.h> +#include <linux/gfp.h> +#include <linux/memblock.h> +#include <linux/dma-contiguous.h> +#include <linux/sizes.h> + +#include <asm/cp15.h> #include <asm/mach-types.h> -#include <asm/hardware.h> +#include <asm/memblock.h> +#include <asm/prom.h> +#include <asm/sections.h> #include <asm/setup.h> #include <asm/tlb.h> +#include <asm/fixmap.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> -#define TABLE_SIZE (2 * PTRS_PER_PTE * sizeof(pte_t)) +#include "mm.h" + +#ifdef CONFIG_CPU_CP15_MMU +unsigned long __init __clear_cr(unsigned long mask) +{ + cr_alignment = cr_alignment & ~mask; + return cr_alignment; +} +#endif -DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); +static phys_addr_t phys_initrd_start __initdata = 0; +static unsigned long phys_initrd_size __initdata = 0; -extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; -extern void _stext, _text, _etext, __data_start, _end, __init_begin, __init_end; -extern unsigned long phys_initrd_start; -extern unsigned long phys_initrd_size; +static int __init early_initrd(char *p) +{ + phys_addr_t start; + unsigned long size; + char *endp; -/* - * The sole use of this is to pass memory configuration - * data from paging_init to mem_init. - */ -static struct meminfo meminfo __initdata = { 0, }; + start = memparse(p, &endp); + if (*endp == ',') { + size = memparse(endp + 1, NULL); + + phys_initrd_start = start; + phys_initrd_size = size; + } + return 0; +} +early_param("initrd", early_initrd); + +static int __init parse_tag_initrd(const struct tag *tag) +{ + printk(KERN_WARNING "ATAG_INITRD is deprecated; " + "please update your bootloader.\n"); + phys_initrd_start = __virt_to_phys(tag->u.initrd.start); + phys_initrd_size = tag->u.initrd.size; + return 0; +} + +__tagtable(ATAG_INITRD, parse_tag_initrd); + +static int __init parse_tag_initrd2(const struct tag *tag) +{ + phys_initrd_start = tag->u.initrd.start; + phys_initrd_size = tag->u.initrd.size; + return 0; +} + +__tagtable(ATAG_INITRD2, parse_tag_initrd2); /* - * empty_zero_page is a special page that is used for - * zero-initialized data and COW. + * This keeps memory configuration data used by a couple memory + * initialization functions, as well as show_mem() for the skipping + * of holes in the memory map. It is populated by arm_add_memory(). */ -struct page *empty_zero_page; - -void show_mem(void) +void show_mem(unsigned int filter) { int free = 0, total = 0, reserved = 0; - int shared = 0, cached = 0, slab = 0, node; + int shared = 0, cached = 0, slab = 0; + struct memblock_region *reg; printk("Mem-info:\n"); - show_free_areas(); - printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); + show_free_areas(filter); - for_each_online_node(node) { + for_each_memblock (memory, reg) { + unsigned int pfn1, pfn2; struct page *page, *end; - page = NODE_MEM_MAP(node); - end = page + NODE_DATA(node)->node_spanned_pages; + pfn1 = memblock_region_memory_base_pfn(reg); + pfn2 = memblock_region_memory_end_pfn(reg); + + page = pfn_to_page(pfn1); + end = pfn_to_page(pfn2 - 1) + 1; do { total++; @@ -74,8 +121,9 @@ void show_mem(void) free++; else shared += page_count(page) - 1; - page++; - } while (page < end); + pfn1++; + page = pfn_to_page(pfn1); + } while (pfn1 < pfn2); } printk("%d pages of RAM\n", total); @@ -86,453 +134,259 @@ void show_mem(void) printk("%d pages swap cached\n", cached); } -struct node_info { - unsigned int start; - unsigned int end; - int bootmap_pages; -}; - -#define O_PFN_DOWN(x) ((x) >> PAGE_SHIFT) -#define O_PFN_UP(x) (PAGE_ALIGN(x) >> PAGE_SHIFT) - -/* - * FIXME: We really want to avoid allocating the bootmap bitmap - * over the top of the initrd. Hopefully, this is located towards - * the start of a bank, so if we allocate the bootmap bitmap at - * the end, we won't clash. - */ -static unsigned int __init -find_bootmap_pfn(int node, struct meminfo *mi, unsigned int bootmap_pages) +static void __init find_limits(unsigned long *min, unsigned long *max_low, + unsigned long *max_high) { - unsigned int start_pfn, bank, bootmap_pfn; - - start_pfn = O_PFN_UP(__pa(&_end)); - bootmap_pfn = 0; - - for (bank = 0; bank < mi->nr_banks; bank ++) { - unsigned int start, end; - - if (mi->bank[bank].node != node) - continue; - - start = mi->bank[bank].start >> PAGE_SHIFT; - end = (mi->bank[bank].size + - mi->bank[bank].start) >> PAGE_SHIFT; - - if (end < start_pfn) - continue; - - if (start < start_pfn) - start = start_pfn; - - if (end <= start) - continue; - - if (end - start >= bootmap_pages) { - bootmap_pfn = start; - break; - } - } + *max_low = PFN_DOWN(memblock_get_current_limit()); + *min = PFN_UP(memblock_start_of_DRAM()); + *max_high = PFN_DOWN(memblock_end_of_DRAM()); +} - if (bootmap_pfn == 0) - BUG(); +#ifdef CONFIG_ZONE_DMA - return bootmap_pfn; -} +phys_addr_t arm_dma_zone_size __read_mostly; +EXPORT_SYMBOL(arm_dma_zone_size); /* - * Scan the memory info structure and pull out: - * - the end of memory - * - the number of nodes - * - the pfn range of each node - * - the number of bootmem bitmap pages + * The DMA mask corresponding to the maximum bus address allocatable + * using GFP_DMA. The default here places no restriction on DMA + * allocations. This must be the smallest DMA mask in the system, + * so a successful GFP_DMA allocation will always satisfy this. */ -static unsigned int __init -find_memend_and_nodes(struct meminfo *mi, struct node_info *np) -{ - unsigned int i, bootmem_pages = 0, memend_pfn = 0; - - for (i = 0; i < MAX_NUMNODES; i++) { - np[i].start = -1U; - np[i].end = 0; - np[i].bootmap_pages = 0; - } - - for (i = 0; i < mi->nr_banks; i++) { - unsigned long start, end; - int node; - - if (mi->bank[i].size == 0) { - /* - * Mark this bank with an invalid node number - */ - mi->bank[i].node = -1; - continue; - } - - node = mi->bank[i].node; - - /* - * Make sure we haven't exceeded the maximum number of nodes - * that we have in this configuration. If we have, we're in - * trouble. (maybe we ought to limit, instead of bugging?) - */ - if (node >= MAX_NUMNODES) - BUG(); - node_set_online(node); - - /* - * Get the start and end pfns for this bank - */ - start = mi->bank[i].start >> PAGE_SHIFT; - end = (mi->bank[i].start + mi->bank[i].size) >> PAGE_SHIFT; - - if (np[node].start > start) - np[node].start = start; - - if (np[node].end < end) - np[node].end = end; - - if (memend_pfn < end) - memend_pfn = end; - } +phys_addr_t arm_dma_limit; +unsigned long arm_dma_pfn_limit; - /* - * Calculate the number of pages we require to - * store the bootmem bitmaps. - */ - for_each_online_node(i) { - if (np[i].end == 0) - continue; - - np[i].bootmap_pages = bootmem_bootmap_pages(np[i].end - - np[i].start); - bootmem_pages += np[i].bootmap_pages; - } - - high_memory = __va(memend_pfn << PAGE_SHIFT); - - /* - * This doesn't seem to be used by the Linux memory - * manager any more. If we can get rid of it, we - * also get rid of some of the stuff above as well. - * - * Note: max_low_pfn and max_pfn reflect the number - * of _pages_ in the system, not the maximum PFN. - */ - max_low_pfn = memend_pfn - O_PFN_DOWN(PHYS_OFFSET); - max_pfn = memend_pfn - O_PFN_DOWN(PHYS_OFFSET); +static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole, + unsigned long dma_size) +{ + if (size[0] <= dma_size) + return; - return bootmem_pages; + size[ZONE_NORMAL] = size[0] - dma_size; + size[ZONE_DMA] = dma_size; + hole[ZONE_NORMAL] = hole[0]; + hole[ZONE_DMA] = 0; } +#endif -static int __init check_initrd(struct meminfo *mi) +void __init setup_dma_zone(const struct machine_desc *mdesc) { - int initrd_node = -2; -#ifdef CONFIG_BLK_DEV_INITRD - unsigned long end = phys_initrd_start + phys_initrd_size; - - /* - * Make sure that the initrd is within a valid area of - * memory. - */ - if (phys_initrd_size) { - unsigned int i; - - initrd_node = -1; - - for (i = 0; i < mi->nr_banks; i++) { - unsigned long bank_end; - - bank_end = mi->bank[i].start + mi->bank[i].size; - - if (mi->bank[i].start <= phys_initrd_start && - end <= bank_end) - initrd_node = mi->bank[i].node; - } - } - - if (initrd_node == -1) { - printk(KERN_ERR "initrd (0x%08lx - 0x%08lx) extends beyond " - "physical memory - disabling initrd\n", - phys_initrd_start, end); - phys_initrd_start = phys_initrd_size = 0; - } +#ifdef CONFIG_ZONE_DMA + if (mdesc->dma_zone_size) { + arm_dma_zone_size = mdesc->dma_zone_size; + arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1; + } else + arm_dma_limit = 0xffffffff; + arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT; #endif - - return initrd_node; } -/* - * Reserve the various regions of node 0 - */ -static __init void reserve_node_zero(unsigned int bootmap_pfn, unsigned int bootmap_pages) +static void __init zone_sizes_init(unsigned long min, unsigned long max_low, + unsigned long max_high) { - pg_data_t *pgdat = NODE_DATA(0); - unsigned long res_size = 0; + unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES]; + struct memblock_region *reg; /* - * Register the kernel text and data with bootmem. - * Note that this can only be in node 0. + * initialise the zones. */ -#ifdef CONFIG_XIP_KERNEL - reserve_bootmem_node(pgdat, __pa(&__data_start), &_end - &__data_start); -#else - reserve_bootmem_node(pgdat, __pa(&_stext), &_end - &_stext); -#endif - - /* - * Reserve the page tables. These are already in use, - * and can only be in node 0. - */ - reserve_bootmem_node(pgdat, __pa(swapper_pg_dir), - PTRS_PER_PGD * sizeof(pgd_t)); + memset(zone_size, 0, sizeof(zone_size)); /* - * And don't forget to reserve the allocator bitmap, - * which will be freed later. + * The memory size has already been determined. If we need + * to do anything fancy with the allocation of this memory + * to the zones, now is the time to do it. */ - reserve_bootmem_node(pgdat, bootmap_pfn << PAGE_SHIFT, - bootmap_pages << PAGE_SHIFT); - - /* - * Hmm... This should go elsewhere, but we really really need to - * stop things allocating the low memory; ideally we need a better - * implementation of GFP_DMA which does not assume that DMA-able - * memory starts at zero. - */ - if (machine_is_integrator() || machine_is_cintegrator()) - res_size = __pa(swapper_pg_dir) - PHYS_OFFSET; + zone_size[0] = max_low - min; +#ifdef CONFIG_HIGHMEM + zone_size[ZONE_HIGHMEM] = max_high - max_low; +#endif /* - * These should likewise go elsewhere. They pre-reserve the - * screen memory region at the start of main system memory. + * Calculate the size of the holes. + * holes = node_size - sum(bank_sizes) */ - if (machine_is_edb7211()) - res_size = 0x00020000; - if (machine_is_p720t()) - res_size = 0x00014000; + memcpy(zhole_size, zone_size, sizeof(zhole_size)); + for_each_memblock(memory, reg) { + unsigned long start = memblock_region_memory_base_pfn(reg); + unsigned long end = memblock_region_memory_end_pfn(reg); + + if (start < max_low) { + unsigned long low_end = min(end, max_low); + zhole_size[0] -= low_end - start; + } +#ifdef CONFIG_HIGHMEM + if (end > max_low) { + unsigned long high_start = max(start, max_low); + zhole_size[ZONE_HIGHMEM] -= end - high_start; + } +#endif + } -#ifdef CONFIG_SA1111 +#ifdef CONFIG_ZONE_DMA /* - * Because of the SA1111 DMA bug, we want to preserve our - * precious DMA-able memory... + * Adjust the sizes according to any special requirements for + * this machine type. */ - res_size = __pa(swapper_pg_dir) - PHYS_OFFSET; + if (arm_dma_zone_size) + arm_adjust_dma_zone(zone_size, zhole_size, + arm_dma_zone_size >> PAGE_SHIFT); #endif - if (res_size) - reserve_bootmem_node(pgdat, PHYS_OFFSET, res_size); + + free_area_init_node(0, zone_size, min, zhole_size); } -/* - * Register all available RAM in this node with the bootmem allocator. - */ -static inline void free_bootmem_node_bank(int node, struct meminfo *mi) +#ifdef CONFIG_HAVE_ARCH_PFN_VALID +int pfn_valid(unsigned long pfn) { - pg_data_t *pgdat = NODE_DATA(node); - int bank; - - for (bank = 0; bank < mi->nr_banks; bank++) - if (mi->bank[bank].node == node) - free_bootmem_node(pgdat, mi->bank[bank].start, - mi->bank[bank].size); + return memblock_is_memory(__pfn_to_phys(pfn)); } +EXPORT_SYMBOL(pfn_valid); +#endif -/* - * Initialise the bootmem allocator for all nodes. This is called - * early during the architecture specific initialisation. - */ -static void __init bootmem_init(struct meminfo *mi) +#ifndef CONFIG_SPARSEMEM +static void __init arm_memory_present(void) { - struct node_info node_info[MAX_NUMNODES], *np = node_info; - unsigned int bootmap_pages, bootmap_pfn, map_pg; - int node, initrd_node; +} +#else +static void __init arm_memory_present(void) +{ + struct memblock_region *reg; - bootmap_pages = find_memend_and_nodes(mi, np); - bootmap_pfn = find_bootmap_pfn(0, mi, bootmap_pages); - initrd_node = check_initrd(mi); + for_each_memblock(memory, reg) + memory_present(0, memblock_region_memory_base_pfn(reg), + memblock_region_memory_end_pfn(reg)); +} +#endif - map_pg = bootmap_pfn; +static bool arm_memblock_steal_permitted = true; - /* - * Initialise the bootmem nodes. - * - * What we really want to do is: - * - * unmap_all_regions_except_kernel(); - * for_each_node_in_reverse_order(node) { - * map_node(node); - * allocate_bootmem_map(node); - * init_bootmem_node(node); - * free_bootmem_node(node); - * } - * - * but this is a 2.5-type change. For now, we just set - * the nodes up in reverse order. - * - * (we could also do with rolling bootmem_init and paging_init - * into one generic "memory_init" type function). - */ - np += num_online_nodes() - 1; - for (node = num_online_nodes() - 1; node >= 0; node--, np--) { - /* - * If there are no pages in this node, ignore it. - * Note that node 0 must always have some pages. - */ - if (np->end == 0 || !node_online(node)) { - if (node == 0) - BUG(); - continue; - } +phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align) +{ + phys_addr_t phys; - /* - * Initialise the bootmem allocator. - */ - init_bootmem_node(NODE_DATA(node), map_pg, np->start, np->end); - free_bootmem_node_bank(node, mi); - map_pg += np->bootmap_pages; + BUG_ON(!arm_memblock_steal_permitted); - /* - * If this is node 0, we need to reserve some areas ASAP - - * we may use bootmem on node 0 to setup the other nodes. - */ - if (node == 0) - reserve_node_zero(bootmap_pfn, bootmap_pages); - } + phys = memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ANYWHERE); + memblock_free(phys, size); + memblock_remove(phys, size); + return phys; +} +void __init arm_memblock_init(const struct machine_desc *mdesc) +{ + /* Register the kernel text, kernel data and initrd with memblock. */ +#ifdef CONFIG_XIP_KERNEL + memblock_reserve(__pa(_sdata), _end - _sdata); +#else + memblock_reserve(__pa(_stext), _end - _stext); +#endif #ifdef CONFIG_BLK_DEV_INITRD - if (phys_initrd_size && initrd_node >= 0) { - reserve_bootmem_node(NODE_DATA(initrd_node), phys_initrd_start, - phys_initrd_size); + /* FDT scan will populate initrd_start */ + if (initrd_start && !phys_initrd_size) { + phys_initrd_start = __virt_to_phys(initrd_start); + phys_initrd_size = initrd_end - initrd_start; + } + initrd_start = initrd_end = 0; + if (phys_initrd_size && + !memblock_is_region_memory(phys_initrd_start, phys_initrd_size)) { + pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region - disabling initrd\n", + (u64)phys_initrd_start, phys_initrd_size); + phys_initrd_start = phys_initrd_size = 0; + } + if (phys_initrd_size && + memblock_is_region_reserved(phys_initrd_start, phys_initrd_size)) { + pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region - disabling initrd\n", + (u64)phys_initrd_start, phys_initrd_size); + phys_initrd_start = phys_initrd_size = 0; + } + if (phys_initrd_size) { + memblock_reserve(phys_initrd_start, phys_initrd_size); + + /* Now convert initrd to virtual addresses */ initrd_start = __phys_to_virt(phys_initrd_start); initrd_end = initrd_start + phys_initrd_size; } #endif - BUG_ON(map_pg != bootmap_pfn + bootmap_pages); + arm_mm_memblock_reserve(); + + /* reserve any platform specific memblock areas */ + if (mdesc->reserve) + mdesc->reserve(); + + early_init_fdt_scan_reserved_mem(); + + /* + * reserve memory for DMA contigouos allocations, + * must come from DMA area inside low memory + */ + dma_contiguous_reserve(min(arm_dma_limit, arm_lowmem_limit)); + + arm_memblock_steal_permitted = false; + memblock_dump_all(); } -/* - * paging_init() sets up the page tables, initialises the zone memory - * maps, and sets up the zero page, bad page and bad page tables. - */ -void __init paging_init(struct meminfo *mi, struct machine_desc *mdesc) +void __init bootmem_init(void) { - void *zero_page; - int node; + unsigned long min, max_low, max_high; - bootmem_init(mi); + memblock_allow_resize(); + max_low = max_high = 0; - memcpy(&meminfo, mi, sizeof(meminfo)); + find_limits(&min, &max_low, &max_high); /* - * allocate the zero page. Note that we count on this going ok. + * Sparsemem tries to allocate bootmem in memory_present(), + * so must be done after the fixed reservations */ - zero_page = alloc_bootmem_low_pages(PAGE_SIZE); + arm_memory_present(); /* - * initialise the page tables. + * sparse_init() needs the bootmem allocator up and running. */ - memtable_init(mi); - if (mdesc->map_io) - mdesc->map_io(); - local_flush_tlb_all(); + sparse_init(); /* - * initialise the zones within each node + * Now free the memory - free_area_init_node needs + * the sparse mem_map arrays initialized by sparse_init() + * for memmap_init_zone(), otherwise all PFNs are invalid. */ - for_each_online_node(node) { - unsigned long zone_size[MAX_NR_ZONES]; - unsigned long zhole_size[MAX_NR_ZONES]; - struct bootmem_data *bdata; - pg_data_t *pgdat; - int i; - - /* - * Initialise the zone size information. - */ - for (i = 0; i < MAX_NR_ZONES; i++) { - zone_size[i] = 0; - zhole_size[i] = 0; - } - - pgdat = NODE_DATA(node); - bdata = pgdat->bdata; - - /* - * The size of this node has already been determined. - * If we need to do anything fancy with the allocation - * of this memory to the zones, now is the time to do - * it. - */ - zone_size[0] = bdata->node_low_pfn - - (bdata->node_boot_start >> PAGE_SHIFT); - - /* - * If this zone has zero size, skip it. - */ - if (!zone_size[0]) - continue; - - /* - * For each bank in this node, calculate the size of the - * holes. holes = node_size - sum(bank_sizes_in_node) - */ - zhole_size[0] = zone_size[0]; - for (i = 0; i < mi->nr_banks; i++) { - if (mi->bank[i].node != node) - continue; - - zhole_size[0] -= mi->bank[i].size >> PAGE_SHIFT; - } - - /* - * Adjust the sizes according to any special - * requirements for this machine type. - */ - arch_adjust_zones(node, zone_size, zhole_size); - - free_area_init_node(node, pgdat, zone_size, - bdata->node_boot_start >> PAGE_SHIFT, zhole_size); - } + zone_sizes_init(min, max_low, max_high); /* - * finish off the bad pages once - * the mem_map is initialised + * This doesn't seem to be used by the Linux memory manager any + * more, but is used by ll_rw_block. If we can get rid of it, we + * also get rid of some of the stuff above as well. */ - memzero(zero_page, PAGE_SIZE); - empty_zero_page = virt_to_page(zero_page); - flush_dcache_page(empty_zero_page); + min_low_pfn = min; + max_low_pfn = max_low; + max_pfn = max_high; } -static inline void free_area(unsigned long addr, unsigned long end, char *s) +/* + * Poison init memory with an undefined instruction (ARM) or a branch to an + * undefined instruction (Thumb). + */ +static inline void poison_init_mem(void *s, size_t count) { - unsigned int size = (end - addr) >> 10; - - for (; addr < end; addr += PAGE_SIZE) { - struct page *page = virt_to_page(addr); - ClearPageReserved(page); - set_page_count(page, 1); - free_page(addr); - totalram_pages++; - } - - if (size && s) - printk(KERN_INFO "Freeing %s memory: %dK\n", s, size); + u32 *p = (u32 *)s; + for (; count != 0; count -= 4) + *p++ = 0xe7fddef0; } static inline void -free_memmap(int node, unsigned long start_pfn, unsigned long end_pfn) +free_memmap(unsigned long start_pfn, unsigned long end_pfn) { struct page *start_pg, *end_pg; - unsigned long pg, pgend; + phys_addr_t pg, pgend; /* * Convert start_pfn/end_pfn to a struct page pointer. */ - start_pg = pfn_to_page(start_pfn); - end_pg = pfn_to_page(end_pfn); + start_pg = pfn_to_page(start_pfn - 1) + 1; + end_pg = pfn_to_page(end_pfn - 1) + 1; /* * Convert to physical addresses, and @@ -546,43 +400,116 @@ free_memmap(int node, unsigned long start_pfn, unsigned long end_pfn) * free the section of the memmap array. */ if (pg < pgend) - free_bootmem_node(NODE_DATA(node), pg, pgend - pg); + memblock_free_early(pg, pgend - pg); } /* * The mem_map array can get very big. Free the unused area of the memory map. */ -static void __init free_unused_memmap_node(int node, struct meminfo *mi) +static void __init free_unused_memmap(void) { - unsigned long bank_start, prev_bank_end = 0; - unsigned int i; + unsigned long start, prev_end = 0; + struct memblock_region *reg; /* - * [FIXME] This relies on each bank being in address order. This - * may not be the case, especially if the user has provided the - * information on the command line. + * This relies on each bank being in address order. + * The banks are sorted previously in bootmem_init(). */ - for (i = 0; i < mi->nr_banks; i++) { - if (mi->bank[i].size == 0 || mi->bank[i].node != node) - continue; - - bank_start = mi->bank[i].start >> PAGE_SHIFT; - if (bank_start < prev_bank_end) { - printk(KERN_ERR "MEM: unordered memory banks. " - "Not freeing memmap.\n"); - break; - } + for_each_memblock(memory, reg) { + start = memblock_region_memory_base_pfn(reg); +#ifdef CONFIG_SPARSEMEM + /* + * Take care not to free memmap entries that don't exist + * due to SPARSEMEM sections which aren't present. + */ + start = min(start, + ALIGN(prev_end, PAGES_PER_SECTION)); +#else + /* + * Align down here since the VM subsystem insists that the + * memmap entries are valid from the bank start aligned to + * MAX_ORDER_NR_PAGES. + */ + start = round_down(start, MAX_ORDER_NR_PAGES); +#endif /* * If we had a previous bank, and there is a space * between the current bank and the previous, free it. */ - if (prev_bank_end && prev_bank_end != bank_start) - free_memmap(node, prev_bank_end, bank_start); + if (prev_end && prev_end < start) + free_memmap(prev_end, start); - prev_bank_end = (mi->bank[i].start + - mi->bank[i].size) >> PAGE_SHIFT; + /* + * Align up here since the VM subsystem insists that the + * memmap entries are valid from the bank end aligned to + * MAX_ORDER_NR_PAGES. + */ + prev_end = ALIGN(memblock_region_memory_end_pfn(reg), + MAX_ORDER_NR_PAGES); } + +#ifdef CONFIG_SPARSEMEM + if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION)) + free_memmap(prev_end, + ALIGN(prev_end, PAGES_PER_SECTION)); +#endif +} + +#ifdef CONFIG_HIGHMEM +static inline void free_area_high(unsigned long pfn, unsigned long end) +{ + for (; pfn < end; pfn++) + free_highmem_page(pfn_to_page(pfn)); +} +#endif + +static void __init free_highpages(void) +{ +#ifdef CONFIG_HIGHMEM + unsigned long max_low = max_low_pfn; + struct memblock_region *mem, *res; + + /* set highmem page free */ + for_each_memblock(memory, mem) { + unsigned long start = memblock_region_memory_base_pfn(mem); + unsigned long end = memblock_region_memory_end_pfn(mem); + + /* Ignore complete lowmem entries */ + if (end <= max_low) + continue; + + /* Truncate partial highmem entries */ + if (start < max_low) + start = max_low; + + /* Find and exclude any reserved regions */ + for_each_memblock(reserved, res) { + unsigned long res_start, res_end; + + res_start = memblock_region_reserved_base_pfn(res); + res_end = memblock_region_reserved_end_pfn(res); + + if (res_end < start) + continue; + if (res_start < start) + res_start = start; + if (res_start > end) + res_start = end; + if (res_end > end) + res_end = end; + if (res_start != start) + free_area_high(start, res_start); + start = res_end; + if (start == end) + break; + } + + /* And now free anything which remains */ + if (start < end) + free_area_high(start, end); + } +#endif } /* @@ -592,51 +519,92 @@ static void __init free_unused_memmap_node(int node, struct meminfo *mi) */ void __init mem_init(void) { - unsigned int codepages, datapages, initpages; - int i, node; +#ifdef CONFIG_HAVE_TCM + /* These pointers are filled in on TCM detection */ + extern u32 dtcm_end; + extern u32 itcm_end; +#endif - codepages = &_etext - &_text; - datapages = &_end - &__data_start; - initpages = &__init_end - &__init_begin; + set_max_mapnr(pfn_to_page(max_pfn) - mem_map); -#ifndef CONFIG_DISCONTIGMEM - max_mapnr = virt_to_page(high_memory) - mem_map; + /* this will put all unused low memory onto the freelists */ + free_unused_memmap(); + free_all_bootmem(); + +#ifdef CONFIG_SA1111 + /* now that our DMA memory is actually so designated, we can free it */ + free_reserved_area(__va(PHYS_OFFSET), swapper_pg_dir, -1, NULL); #endif - /* this will put all unused low memory onto the freelists */ - for_each_online_node(node) { - pg_data_t *pgdat = NODE_DATA(node); + free_highpages(); - free_unused_memmap_node(node, &meminfo); + mem_init_print_info(NULL); - if (pgdat->node_spanned_pages != 0) - totalram_pages += free_all_bootmem_node(pgdat); - } +#define MLK(b, t) b, t, ((t) - (b)) >> 10 +#define MLM(b, t) b, t, ((t) - (b)) >> 20 +#define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K) -#ifdef CONFIG_SA1111 - /* now that our DMA memory is actually so designated, we can free it */ - free_area(PAGE_OFFSET, (unsigned long)swapper_pg_dir, NULL); + printk(KERN_NOTICE "Virtual kernel memory layout:\n" + " vector : 0x%08lx - 0x%08lx (%4ld kB)\n" +#ifdef CONFIG_HAVE_TCM + " DTCM : 0x%08lx - 0x%08lx (%4ld kB)\n" + " ITCM : 0x%08lx - 0x%08lx (%4ld kB)\n" +#endif + " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" + " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n" + " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n" +#ifdef CONFIG_HIGHMEM + " pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n" +#endif +#ifdef CONFIG_MODULES + " modules : 0x%08lx - 0x%08lx (%4ld MB)\n" #endif + " .text : 0x%p" " - 0x%p" " (%4d kB)\n" + " .init : 0x%p" " - 0x%p" " (%4d kB)\n" + " .data : 0x%p" " - 0x%p" " (%4d kB)\n" + " .bss : 0x%p" " - 0x%p" " (%4d kB)\n", + + MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) + + (PAGE_SIZE)), +#ifdef CONFIG_HAVE_TCM + MLK(DTCM_OFFSET, (unsigned long) dtcm_end), + MLK(ITCM_OFFSET, (unsigned long) itcm_end), +#endif + MLK(FIXADDR_START, FIXADDR_TOP), + MLM(VMALLOC_START, VMALLOC_END), + MLM(PAGE_OFFSET, (unsigned long)high_memory), +#ifdef CONFIG_HIGHMEM + MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) * + (PAGE_SIZE)), +#endif +#ifdef CONFIG_MODULES + MLM(MODULES_VADDR, MODULES_END), +#endif + + MLK_ROUNDUP(_text, _etext), + MLK_ROUNDUP(__init_begin, __init_end), + MLK_ROUNDUP(_sdata, _edata), + MLK_ROUNDUP(__bss_start, __bss_stop)); + +#undef MLK +#undef MLM +#undef MLK_ROUNDUP /* - * Since our memory may not be contiguous, calculate the - * real number of pages we have in this system + * Check boundaries twice: Some fundamental inconsistencies can + * be detected at build time already. */ - printk(KERN_INFO "Memory:"); - - num_physpages = 0; - for (i = 0; i < meminfo.nr_banks; i++) { - num_physpages += meminfo.bank[i].size >> PAGE_SHIFT; - printk(" %ldMB", meminfo.bank[i].size >> 20); - } +#ifdef CONFIG_MMU + BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR); + BUG_ON(TASK_SIZE > MODULES_VADDR); +#endif - printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT)); - printk(KERN_NOTICE "Memory: %luKB available (%dK code, " - "%dK data, %dK init)\n", - (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), - codepages >> 10, datapages >> 10, initpages >> 10); +#ifdef CONFIG_HIGHMEM + BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET); + BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET); +#endif - if (PAGE_SIZE >= 16384 && num_physpages <= 128) { + if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) { extern int sysctl_overcommit_memory; /* * On a machine this small we won't get @@ -649,11 +617,16 @@ void __init mem_init(void) void free_initmem(void) { - if (!machine_is_integrator() && !machine_is_cintegrator()) { - free_area((unsigned long)(&__init_begin), - (unsigned long)(&__init_end), - "init"); - } +#ifdef CONFIG_HAVE_TCM + extern char __tcm_start, __tcm_end; + + poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start); + free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link"); +#endif + + poison_init_mem(__init_begin, __init_end - __init_begin); + if (!machine_is_integrator() && !machine_is_cintegrator()) + free_initmem_default(-1); } #ifdef CONFIG_BLK_DEV_INITRD @@ -662,8 +635,10 @@ static int keep_initrd; void free_initrd_mem(unsigned long start, unsigned long end) { - if (!keep_initrd) - free_area(start, end, "initrd"); + if (!keep_initrd) { + poison_init_mem((void *)start, PAGE_ALIGN(end) - start); + free_reserved_area((void *)start, (void *)end, -1, "initrd"); + } } static int __init keepinitrd_setup(char *__unused) |
