diff options
Diffstat (limited to 'arch/arm/mm/init.c')
| -rw-r--r-- | arch/arm/mm/init.c | 786 |
1 files changed, 385 insertions, 401 deletions
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index a04ffbbbe25..659c75d808d 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -13,38 +13,57 @@ #include <linux/init.h> #include <linux/bootmem.h> #include <linux/mman.h> +#include <linux/export.h> #include <linux/nodemask.h> #include <linux/initrd.h> -#include <linux/sort.h> +#include <linux/of_fdt.h> #include <linux/highmem.h> +#include <linux/gfp.h> +#include <linux/memblock.h> +#include <linux/dma-contiguous.h> +#include <linux/sizes.h> +#include <asm/cp15.h> #include <asm/mach-types.h> +#include <asm/memblock.h> +#include <asm/prom.h> #include <asm/sections.h> #include <asm/setup.h> -#include <asm/sizes.h> #include <asm/tlb.h> +#include <asm/fixmap.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include "mm.h" -static unsigned long phys_initrd_start __initdata = 0; +#ifdef CONFIG_CPU_CP15_MMU +unsigned long __init __clear_cr(unsigned long mask) +{ + cr_alignment = cr_alignment & ~mask; + return cr_alignment; +} +#endif + +static phys_addr_t phys_initrd_start __initdata = 0; static unsigned long phys_initrd_size __initdata = 0; -static void __init early_initrd(char **p) +static int __init early_initrd(char *p) { - unsigned long start, size; + phys_addr_t start; + unsigned long size; + char *endp; - start = memparse(*p, p); - if (**p == ',') { - size = memparse((*p) + 1, p); + start = memparse(p, &endp); + if (*endp == ',') { + size = memparse(endp + 1, NULL); phys_initrd_start = start; phys_initrd_size = size; } + return 0; } -__early_param("initrd=", early_initrd); +early_param("initrd", early_initrd); static int __init parse_tag_initrd(const struct tag *tag) { @@ -71,46 +90,40 @@ __tagtable(ATAG_INITRD2, parse_tag_initrd2); * initialization functions, as well as show_mem() for the skipping * of holes in the memory map. It is populated by arm_add_memory(). */ -struct meminfo meminfo; - -void show_mem(void) +void show_mem(unsigned int filter) { int free = 0, total = 0, reserved = 0; - int shared = 0, cached = 0, slab = 0, node, i; - struct meminfo * mi = &meminfo; + int shared = 0, cached = 0, slab = 0; + struct memblock_region *reg; printk("Mem-info:\n"); - show_free_areas(); - for_each_online_node(node) { - pg_data_t *n = NODE_DATA(node); - struct page *map = pgdat_page_nr(n, 0) - n->node_start_pfn; - - for_each_nodebank (i,mi,node) { - struct membank *bank = &mi->bank[i]; - unsigned int pfn1, pfn2; - struct page *page, *end; - - pfn1 = bank_pfn_start(bank); - pfn2 = bank_pfn_end(bank); - - page = map + pfn1; - end = map + pfn2; - - do { - total++; - if (PageReserved(page)) - reserved++; - else if (PageSwapCache(page)) - cached++; - else if (PageSlab(page)) - slab++; - else if (!page_count(page)) - free++; - else - shared += page_count(page) - 1; - page++; - } while (page < end); - } + show_free_areas(filter); + + for_each_memblock (memory, reg) { + unsigned int pfn1, pfn2; + struct page *page, *end; + + pfn1 = memblock_region_memory_base_pfn(reg); + pfn2 = memblock_region_memory_end_pfn(reg); + + page = pfn_to_page(pfn1); + end = pfn_to_page(pfn2 - 1) + 1; + + do { + total++; + if (PageReserved(page)) + reserved++; + else if (PageSwapCache(page)) + cached++; + else if (PageSlab(page)) + slab++; + else if (!page_count(page)) + free++; + else + shared += page_count(page) - 1; + pfn1++; + page = pfn_to_page(pfn1); + } while (pfn1 < pfn2); } printk("%d pages of RAM\n", total); @@ -121,205 +134,68 @@ void show_mem(void) printk("%d pages swap cached\n", cached); } -static void __init find_node_limits(int node, struct meminfo *mi, - unsigned long *min, unsigned long *max_low, unsigned long *max_high) +static void __init find_limits(unsigned long *min, unsigned long *max_low, + unsigned long *max_high) { - int i; - - *min = -1UL; - *max_low = *max_high = 0; - - for_each_nodebank(i, mi, node) { - struct membank *bank = &mi->bank[i]; - unsigned long start, end; + *max_low = PFN_DOWN(memblock_get_current_limit()); + *min = PFN_UP(memblock_start_of_DRAM()); + *max_high = PFN_DOWN(memblock_end_of_DRAM()); +} - start = bank_pfn_start(bank); - end = bank_pfn_end(bank); +#ifdef CONFIG_ZONE_DMA - if (*min > start) - *min = start; - if (*max_high < end) - *max_high = end; - if (bank->highmem) - continue; - if (*max_low < end) - *max_low = end; - } -} +phys_addr_t arm_dma_zone_size __read_mostly; +EXPORT_SYMBOL(arm_dma_zone_size); /* - * FIXME: We really want to avoid allocating the bootmap bitmap - * over the top of the initrd. Hopefully, this is located towards - * the start of a bank, so if we allocate the bootmap bitmap at - * the end, we won't clash. + * The DMA mask corresponding to the maximum bus address allocatable + * using GFP_DMA. The default here places no restriction on DMA + * allocations. This must be the smallest DMA mask in the system, + * so a successful GFP_DMA allocation will always satisfy this. */ -static unsigned int __init -find_bootmap_pfn(int node, struct meminfo *mi, unsigned int bootmap_pages) -{ - unsigned int start_pfn, i, bootmap_pfn; - - start_pfn = PAGE_ALIGN(__pa(_end)) >> PAGE_SHIFT; - bootmap_pfn = 0; - - for_each_nodebank(i, mi, node) { - struct membank *bank = &mi->bank[i]; - unsigned int start, end; - - start = bank_pfn_start(bank); - end = bank_pfn_end(bank); - - if (end < start_pfn) - continue; - - if (start < start_pfn) - start = start_pfn; - - if (end <= start) - continue; - - if (end - start >= bootmap_pages) { - bootmap_pfn = start; - break; - } - } - - if (bootmap_pfn == 0) - BUG(); - - return bootmap_pfn; -} +phys_addr_t arm_dma_limit; +unsigned long arm_dma_pfn_limit; -static int __init check_initrd(struct meminfo *mi) +static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole, + unsigned long dma_size) { - int initrd_node = -2; -#ifdef CONFIG_BLK_DEV_INITRD - unsigned long end = phys_initrd_start + phys_initrd_size; - - /* - * Make sure that the initrd is within a valid area of - * memory. - */ - if (phys_initrd_size) { - unsigned int i; - - initrd_node = -1; - - for (i = 0; i < mi->nr_banks; i++) { - struct membank *bank = &mi->bank[i]; - if (bank_phys_start(bank) <= phys_initrd_start && - end <= bank_phys_end(bank)) - initrd_node = bank->node; - } - } - - if (initrd_node == -1) { - printk(KERN_ERR "INITRD: 0x%08lx+0x%08lx extends beyond " - "physical memory - disabling initrd\n", - phys_initrd_start, phys_initrd_size); - phys_initrd_start = phys_initrd_size = 0; - } -#endif + if (size[0] <= dma_size) + return; - return initrd_node; + size[ZONE_NORMAL] = size[0] - dma_size; + size[ZONE_DMA] = dma_size; + hole[ZONE_NORMAL] = hole[0]; + hole[ZONE_DMA] = 0; } - -static inline void map_memory_bank(struct membank *bank) -{ -#ifdef CONFIG_MMU - struct map_desc map; - - map.pfn = bank_pfn_start(bank); - map.virtual = __phys_to_virt(bank_phys_start(bank)); - map.length = bank_phys_size(bank); - map.type = MT_MEMORY; - - create_mapping(&map); #endif -} -static void __init bootmem_init_node(int node, struct meminfo *mi, - unsigned long start_pfn, unsigned long end_pfn) +void __init setup_dma_zone(const struct machine_desc *mdesc) { - unsigned long boot_pfn; - unsigned int boot_pages; - pg_data_t *pgdat; - int i; - - /* - * Map the memory banks for this node. - */ - for_each_nodebank(i, mi, node) { - struct membank *bank = &mi->bank[i]; - - if (!bank->highmem) - map_memory_bank(bank); - } - - /* - * Allocate the bootmem bitmap page. - */ - boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn); - boot_pfn = find_bootmap_pfn(node, mi, boot_pages); - - /* - * Initialise the bootmem allocator for this node, handing the - * memory banks over to bootmem. - */ - node_set_online(node); - pgdat = NODE_DATA(node); - init_bootmem_node(pgdat, boot_pfn, start_pfn, end_pfn); - - for_each_nodebank(i, mi, node) { - struct membank *bank = &mi->bank[i]; - if (!bank->highmem) - free_bootmem_node(pgdat, bank_phys_start(bank), bank_phys_size(bank)); - } - - /* - * Reserve the bootmem bitmap for this node. - */ - reserve_bootmem_node(pgdat, boot_pfn << PAGE_SHIFT, - boot_pages << PAGE_SHIFT, BOOTMEM_DEFAULT); -} - -static void __init bootmem_reserve_initrd(int node) -{ -#ifdef CONFIG_BLK_DEV_INITRD - pg_data_t *pgdat = NODE_DATA(node); - int res; - - res = reserve_bootmem_node(pgdat, phys_initrd_start, - phys_initrd_size, BOOTMEM_EXCLUSIVE); - - if (res == 0) { - initrd_start = __phys_to_virt(phys_initrd_start); - initrd_end = initrd_start + phys_initrd_size; - } else { - printk(KERN_ERR - "INITRD: 0x%08lx+0x%08lx overlaps in-use " - "memory region - disabling initrd\n", - phys_initrd_start, phys_initrd_size); - } +#ifdef CONFIG_ZONE_DMA + if (mdesc->dma_zone_size) { + arm_dma_zone_size = mdesc->dma_zone_size; + arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1; + } else + arm_dma_limit = 0xffffffff; + arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT; #endif } -static void __init bootmem_free_node(int node, struct meminfo *mi) +static void __init zone_sizes_init(unsigned long min, unsigned long max_low, + unsigned long max_high) { unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES]; - unsigned long min, max_low, max_high; - int i; - - find_node_limits(node, mi, &min, &max_low, &max_high); + struct memblock_region *reg; /* - * initialise the zones within this node. + * initialise the zones. */ memset(zone_size, 0, sizeof(zone_size)); /* - * The size of this node has already been determined. If we need - * to do anything fancy with the allocation of this memory to the - * zones, now is the time to do it. + * The memory size has already been determined. If we need + * to do anything fancy with the allocation of this memory + * to the zones, now is the time to do it. */ zone_size[0] = max_low - min; #ifdef CONFIG_HIGHMEM @@ -327,125 +203,145 @@ static void __init bootmem_free_node(int node, struct meminfo *mi) #endif /* - * For each bank in this node, calculate the size of the holes. - * holes = node_size - sum(bank_sizes_in_node) + * Calculate the size of the holes. + * holes = node_size - sum(bank_sizes) */ memcpy(zhole_size, zone_size, sizeof(zhole_size)); - for_each_nodebank(i, mi, node) { - int idx = 0; + for_each_memblock(memory, reg) { + unsigned long start = memblock_region_memory_base_pfn(reg); + unsigned long end = memblock_region_memory_end_pfn(reg); + + if (start < max_low) { + unsigned long low_end = min(end, max_low); + zhole_size[0] -= low_end - start; + } #ifdef CONFIG_HIGHMEM - if (mi->bank[i].highmem) - idx = ZONE_HIGHMEM; + if (end > max_low) { + unsigned long high_start = max(start, max_low); + zhole_size[ZONE_HIGHMEM] -= end - high_start; + } #endif - zhole_size[idx] -= bank_pfn_size(&mi->bank[i]); } +#ifdef CONFIG_ZONE_DMA /* * Adjust the sizes according to any special requirements for * this machine type. */ - arch_adjust_zones(node, zone_size, zhole_size); + if (arm_dma_zone_size) + arm_adjust_dma_zone(zone_size, zhole_size, + arm_dma_zone_size >> PAGE_SHIFT); +#endif - free_area_init_node(node, zone_size, min, zhole_size); + free_area_init_node(0, zone_size, min, zhole_size); } -#ifndef CONFIG_SPARSEMEM +#ifdef CONFIG_HAVE_ARCH_PFN_VALID int pfn_valid(unsigned long pfn) { - struct meminfo *mi = &meminfo; - unsigned int left = 0, right = mi->nr_banks; - - do { - unsigned int mid = (right + left) / 2; - struct membank *bank = &mi->bank[mid]; - - if (pfn < bank_pfn_start(bank)) - right = mid; - else if (pfn >= bank_pfn_end(bank)) - left = mid + 1; - else - return 1; - } while (left < right); - return 0; + return memblock_is_memory(__pfn_to_phys(pfn)); } EXPORT_SYMBOL(pfn_valid); +#endif -static void arm_memory_present(struct meminfo *mi, int node) +#ifndef CONFIG_SPARSEMEM +static void __init arm_memory_present(void) { } #else -static void arm_memory_present(struct meminfo *mi, int node) +static void __init arm_memory_present(void) { - int i; - for_each_nodebank(i, mi, node) { - struct membank *bank = &mi->bank[i]; - memory_present(node, bank_pfn_start(bank), bank_pfn_end(bank)); - } + struct memblock_region *reg; + + for_each_memblock(memory, reg) + memory_present(0, memblock_region_memory_base_pfn(reg), + memblock_region_memory_end_pfn(reg)); } #endif -static int __init meminfo_cmp(const void *_a, const void *_b) +static bool arm_memblock_steal_permitted = true; + +phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align) { - const struct membank *a = _a, *b = _b; - long cmp = bank_pfn_start(a) - bank_pfn_start(b); - return cmp < 0 ? -1 : cmp > 0 ? 1 : 0; + phys_addr_t phys; + + BUG_ON(!arm_memblock_steal_permitted); + + phys = memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ANYWHERE); + memblock_free(phys, size); + memblock_remove(phys, size); + + return phys; } -void __init bootmem_init(void) +void __init arm_memblock_init(const struct machine_desc *mdesc) { - struct meminfo *mi = &meminfo; - unsigned long min, max_low, max_high; - int node, initrd_node; + /* Register the kernel text, kernel data and initrd with memblock. */ +#ifdef CONFIG_XIP_KERNEL + memblock_reserve(__pa(_sdata), _end - _sdata); +#else + memblock_reserve(__pa(_stext), _end - _stext); +#endif +#ifdef CONFIG_BLK_DEV_INITRD + /* FDT scan will populate initrd_start */ + if (initrd_start && !phys_initrd_size) { + phys_initrd_start = __virt_to_phys(initrd_start); + phys_initrd_size = initrd_end - initrd_start; + } + initrd_start = initrd_end = 0; + if (phys_initrd_size && + !memblock_is_region_memory(phys_initrd_start, phys_initrd_size)) { + pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region - disabling initrd\n", + (u64)phys_initrd_start, phys_initrd_size); + phys_initrd_start = phys_initrd_size = 0; + } + if (phys_initrd_size && + memblock_is_region_reserved(phys_initrd_start, phys_initrd_size)) { + pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region - disabling initrd\n", + (u64)phys_initrd_start, phys_initrd_size); + phys_initrd_start = phys_initrd_size = 0; + } + if (phys_initrd_size) { + memblock_reserve(phys_initrd_start, phys_initrd_size); + + /* Now convert initrd to virtual addresses */ + initrd_start = __phys_to_virt(phys_initrd_start); + initrd_end = initrd_start + phys_initrd_size; + } +#endif - sort(&mi->bank, mi->nr_banks, sizeof(mi->bank[0]), meminfo_cmp, NULL); + arm_mm_memblock_reserve(); - /* - * Locate which node contains the ramdisk image, if any. - */ - initrd_node = check_initrd(mi); + /* reserve any platform specific memblock areas */ + if (mdesc->reserve) + mdesc->reserve(); - max_low = max_high = 0; + early_init_fdt_scan_reserved_mem(); /* - * Run through each node initialising the bootmem allocator. + * reserve memory for DMA contigouos allocations, + * must come from DMA area inside low memory */ - for_each_node(node) { - unsigned long node_low, node_high; - - find_node_limits(node, mi, &min, &node_low, &node_high); + dma_contiguous_reserve(min(arm_dma_limit, arm_lowmem_limit)); - if (node_low > max_low) - max_low = node_low; - if (node_high > max_high) - max_high = node_high; - - /* - * If there is no memory in this node, ignore it. - * (We can't have nodes which have no lowmem) - */ - if (node_low == 0) - continue; + arm_memblock_steal_permitted = false; + memblock_dump_all(); +} - bootmem_init_node(node, mi, min, node_low); +void __init bootmem_init(void) +{ + unsigned long min, max_low, max_high; - /* - * Reserve any special node zero regions. - */ - if (node == 0) - reserve_node_zero(NODE_DATA(node)); + memblock_allow_resize(); + max_low = max_high = 0; - /* - * If the initrd is in this node, reserve its memory. - */ - if (node == initrd_node) - bootmem_reserve_initrd(node); + find_limits(&min, &max_low, &max_high); - /* - * Sparsemem tries to allocate bootmem in memory_present(), - * so must be done after the fixed reservations - */ - arm_memory_present(mi, node); - } + /* + * Sparsemem tries to allocate bootmem in memory_present(), + * so must be done after the fixed reservations + */ + arm_memory_present(); /* * sparse_init() needs the bootmem allocator up and running. @@ -453,56 +349,44 @@ void __init bootmem_init(void) sparse_init(); /* - * Now free memory in each node - free_area_init_node needs + * Now free the memory - free_area_init_node needs * the sparse mem_map arrays initialized by sparse_init() * for memmap_init_zone(), otherwise all PFNs are invalid. */ - for_each_node(node) - bootmem_free_node(node, mi); - - high_memory = __va((max_low << PAGE_SHIFT) - 1) + 1; + zone_sizes_init(min, max_low, max_high); /* * This doesn't seem to be used by the Linux memory manager any * more, but is used by ll_rw_block. If we can get rid of it, we * also get rid of some of the stuff above as well. - * - * Note: max_low_pfn and max_pfn reflect the number of _pages_ in - * the system, not the maximum PFN. */ - max_low_pfn = max_low - PHYS_PFN_OFFSET; - max_pfn = max_high - PHYS_PFN_OFFSET; + min_low_pfn = min; + max_low_pfn = max_low; + max_pfn = max_high; } -static inline int free_area(unsigned long pfn, unsigned long end, char *s) +/* + * Poison init memory with an undefined instruction (ARM) or a branch to an + * undefined instruction (Thumb). + */ +static inline void poison_init_mem(void *s, size_t count) { - unsigned int pages = 0, size = (end - pfn) << (PAGE_SHIFT - 10); - - for (; pfn < end; pfn++) { - struct page *page = pfn_to_page(pfn); - ClearPageReserved(page); - init_page_count(page); - __free_page(page); - pages++; - } - - if (size && s) - printk(KERN_INFO "Freeing %s memory: %dK\n", s, size); - - return pages; + u32 *p = (u32 *)s; + for (; count != 0; count -= 4) + *p++ = 0xe7fddef0; } static inline void -free_memmap(int node, unsigned long start_pfn, unsigned long end_pfn) +free_memmap(unsigned long start_pfn, unsigned long end_pfn) { struct page *start_pg, *end_pg; - unsigned long pg, pgend; + phys_addr_t pg, pgend; /* * Convert start_pfn/end_pfn to a struct page pointer. */ start_pg = pfn_to_page(start_pfn - 1) + 1; - end_pg = pfn_to_page(end_pfn); + end_pg = pfn_to_page(end_pfn - 1) + 1; /* * Convert to physical addresses, and @@ -516,41 +400,116 @@ free_memmap(int node, unsigned long start_pfn, unsigned long end_pfn) * free the section of the memmap array. */ if (pg < pgend) - free_bootmem_node(NODE_DATA(node), pg, pgend - pg); + memblock_free_early(pg, pgend - pg); } /* * The mem_map array can get very big. Free the unused area of the memory map. */ -static void __init free_unused_memmap_node(int node, struct meminfo *mi) +static void __init free_unused_memmap(void) { - unsigned long bank_start, prev_bank_end = 0; - unsigned int i; + unsigned long start, prev_end = 0; + struct memblock_region *reg; /* - * [FIXME] This relies on each bank being in address order. This - * may not be the case, especially if the user has provided the - * information on the command line. + * This relies on each bank being in address order. + * The banks are sorted previously in bootmem_init(). */ - for_each_nodebank(i, mi, node) { - struct membank *bank = &mi->bank[i]; - - bank_start = bank_pfn_start(bank); - if (bank_start < prev_bank_end) { - printk(KERN_ERR "MEM: unordered memory banks. " - "Not freeing memmap.\n"); - break; - } + for_each_memblock(memory, reg) { + start = memblock_region_memory_base_pfn(reg); +#ifdef CONFIG_SPARSEMEM + /* + * Take care not to free memmap entries that don't exist + * due to SPARSEMEM sections which aren't present. + */ + start = min(start, + ALIGN(prev_end, PAGES_PER_SECTION)); +#else + /* + * Align down here since the VM subsystem insists that the + * memmap entries are valid from the bank start aligned to + * MAX_ORDER_NR_PAGES. + */ + start = round_down(start, MAX_ORDER_NR_PAGES); +#endif /* * If we had a previous bank, and there is a space * between the current bank and the previous, free it. */ - if (prev_bank_end && prev_bank_end != bank_start) - free_memmap(node, prev_bank_end, bank_start); + if (prev_end && prev_end < start) + free_memmap(prev_end, start); - prev_bank_end = bank_pfn_end(bank); + /* + * Align up here since the VM subsystem insists that the + * memmap entries are valid from the bank end aligned to + * MAX_ORDER_NR_PAGES. + */ + prev_end = ALIGN(memblock_region_memory_end_pfn(reg), + MAX_ORDER_NR_PAGES); } + +#ifdef CONFIG_SPARSEMEM + if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION)) + free_memmap(prev_end, + ALIGN(prev_end, PAGES_PER_SECTION)); +#endif +} + +#ifdef CONFIG_HIGHMEM +static inline void free_area_high(unsigned long pfn, unsigned long end) +{ + for (; pfn < end; pfn++) + free_highmem_page(pfn_to_page(pfn)); +} +#endif + +static void __init free_highpages(void) +{ +#ifdef CONFIG_HIGHMEM + unsigned long max_low = max_low_pfn; + struct memblock_region *mem, *res; + + /* set highmem page free */ + for_each_memblock(memory, mem) { + unsigned long start = memblock_region_memory_base_pfn(mem); + unsigned long end = memblock_region_memory_end_pfn(mem); + + /* Ignore complete lowmem entries */ + if (end <= max_low) + continue; + + /* Truncate partial highmem entries */ + if (start < max_low) + start = max_low; + + /* Find and exclude any reserved regions */ + for_each_memblock(reserved, res) { + unsigned long res_start, res_end; + + res_start = memblock_region_reserved_base_pfn(res); + res_end = memblock_region_reserved_end_pfn(res); + + if (res_end < start) + continue; + if (res_start < start) + res_start = start; + if (res_start > end) + res_start = end; + if (res_end > end) + res_end = end; + if (res_start != start) + free_area_high(start, res_start); + start = res_end; + if (start == end) + break; + } + + /* And now free anything which remains */ + if (start < end) + free_area_high(start, end); + } +#endif } /* @@ -560,65 +519,92 @@ static void __init free_unused_memmap_node(int node, struct meminfo *mi) */ void __init mem_init(void) { - unsigned int codesize, datasize, initsize; - int i, node; - -#ifndef CONFIG_DISCONTIGMEM - max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map; +#ifdef CONFIG_HAVE_TCM + /* These pointers are filled in on TCM detection */ + extern u32 dtcm_end; + extern u32 itcm_end; #endif - /* this will put all unused low memory onto the freelists */ - for_each_online_node(node) { - pg_data_t *pgdat = NODE_DATA(node); - - free_unused_memmap_node(node, &meminfo); + set_max_mapnr(pfn_to_page(max_pfn) - mem_map); - if (pgdat->node_spanned_pages != 0) - totalram_pages += free_all_bootmem_node(pgdat); - } + /* this will put all unused low memory onto the freelists */ + free_unused_memmap(); + free_all_bootmem(); #ifdef CONFIG_SA1111 /* now that our DMA memory is actually so designated, we can free it */ - totalram_pages += free_area(PHYS_PFN_OFFSET, - __phys_to_pfn(__pa(swapper_pg_dir)), NULL); + free_reserved_area(__va(PHYS_OFFSET), swapper_pg_dir, -1, NULL); #endif + free_highpages(); + + mem_init_print_info(NULL); + +#define MLK(b, t) b, t, ((t) - (b)) >> 10 +#define MLM(b, t) b, t, ((t) - (b)) >> 20 +#define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K) + + printk(KERN_NOTICE "Virtual kernel memory layout:\n" + " vector : 0x%08lx - 0x%08lx (%4ld kB)\n" +#ifdef CONFIG_HAVE_TCM + " DTCM : 0x%08lx - 0x%08lx (%4ld kB)\n" + " ITCM : 0x%08lx - 0x%08lx (%4ld kB)\n" +#endif + " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" + " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n" + " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n" #ifdef CONFIG_HIGHMEM - /* set highmem page free */ - for_each_online_node(node) { - for_each_nodebank (i, &meminfo, node) { - unsigned long start = bank_pfn_start(&meminfo.bank[i]); - unsigned long end = bank_pfn_end(&meminfo.bank[i]); - if (start >= max_low_pfn + PHYS_PFN_OFFSET) - totalhigh_pages += free_area(start, end, NULL); - } - } - totalram_pages += totalhigh_pages; + " pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n" +#endif +#ifdef CONFIG_MODULES + " modules : 0x%08lx - 0x%08lx (%4ld MB)\n" #endif + " .text : 0x%p" " - 0x%p" " (%4d kB)\n" + " .init : 0x%p" " - 0x%p" " (%4d kB)\n" + " .data : 0x%p" " - 0x%p" " (%4d kB)\n" + " .bss : 0x%p" " - 0x%p" " (%4d kB)\n", + + MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) + + (PAGE_SIZE)), +#ifdef CONFIG_HAVE_TCM + MLK(DTCM_OFFSET, (unsigned long) dtcm_end), + MLK(ITCM_OFFSET, (unsigned long) itcm_end), +#endif + MLK(FIXADDR_START, FIXADDR_TOP), + MLM(VMALLOC_START, VMALLOC_END), + MLM(PAGE_OFFSET, (unsigned long)high_memory), +#ifdef CONFIG_HIGHMEM + MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) * + (PAGE_SIZE)), +#endif +#ifdef CONFIG_MODULES + MLM(MODULES_VADDR, MODULES_END), +#endif + + MLK_ROUNDUP(_text, _etext), + MLK_ROUNDUP(__init_begin, __init_end), + MLK_ROUNDUP(_sdata, _edata), + MLK_ROUNDUP(__bss_start, __bss_stop)); + +#undef MLK +#undef MLM +#undef MLK_ROUNDUP /* - * Since our memory may not be contiguous, calculate the - * real number of pages we have in this system + * Check boundaries twice: Some fundamental inconsistencies can + * be detected at build time already. */ - printk(KERN_INFO "Memory:"); - num_physpages = 0; - for (i = 0; i < meminfo.nr_banks; i++) { - num_physpages += bank_pfn_size(&meminfo.bank[i]); - printk(" %ldMB", bank_phys_size(&meminfo.bank[i]) >> 20); - } - printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT)); - - codesize = _etext - _text; - datasize = _end - _data; - initsize = __init_end - __init_begin; +#ifdef CONFIG_MMU + BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR); + BUG_ON(TASK_SIZE > MODULES_VADDR); +#endif - printk(KERN_NOTICE "Memory: %luKB available (%dK code, " - "%dK data, %dK init, %luK highmem)\n", - nr_free_pages() << (PAGE_SHIFT-10), codesize >> 10, - datasize >> 10, initsize >> 10, - totalhigh_pages << (PAGE_SHIFT-10)); +#ifdef CONFIG_HIGHMEM + BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET); + BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET); +#endif - if (PAGE_SIZE >= 16384 && num_physpages <= 128) { + if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) { extern int sysctl_overcommit_memory; /* * On a machine this small we won't get @@ -632,17 +618,15 @@ void __init mem_init(void) void free_initmem(void) { #ifdef CONFIG_HAVE_TCM - extern char *__tcm_start, *__tcm_end; + extern char __tcm_start, __tcm_end; - totalram_pages += free_area(__phys_to_pfn(__pa(__tcm_start)), - __phys_to_pfn(__pa(__tcm_end)), - "TCM link"); + poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start); + free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link"); #endif + poison_init_mem(__init_begin, __init_end - __init_begin); if (!machine_is_integrator() && !machine_is_cintegrator()) - totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)), - __phys_to_pfn(__pa(__init_end)), - "init"); + free_initmem_default(-1); } #ifdef CONFIG_BLK_DEV_INITRD @@ -651,10 +635,10 @@ static int keep_initrd; void free_initrd_mem(unsigned long start, unsigned long end) { - if (!keep_initrd) - totalram_pages += free_area(__phys_to_pfn(__pa(start)), - __phys_to_pfn(__pa(end)), - "initrd"); + if (!keep_initrd) { + poison_init_mem((void *)start, PAGE_ALIGN(end) - start); + free_reserved_area((void *)start, (void *)end, -1, "initrd"); + } } static int __init keepinitrd_setup(char *__unused) |
