diff options
Diffstat (limited to 'arch/arm/mm/init.c')
| -rw-r--r-- | arch/arm/mm/init.c | 758 |
1 files changed, 452 insertions, 306 deletions
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index ec00f26bffa..659c75d808d 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -13,72 +13,117 @@ #include <linux/init.h> #include <linux/bootmem.h> #include <linux/mman.h> +#include <linux/export.h> #include <linux/nodemask.h> #include <linux/initrd.h> - +#include <linux/of_fdt.h> +#include <linux/highmem.h> +#include <linux/gfp.h> +#include <linux/memblock.h> +#include <linux/dma-contiguous.h> +#include <linux/sizes.h> + +#include <asm/cp15.h> #include <asm/mach-types.h> +#include <asm/memblock.h> +#include <asm/prom.h> +#include <asm/sections.h> #include <asm/setup.h> -#include <asm/sizes.h> #include <asm/tlb.h> +#include <asm/fixmap.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include "mm.h" -extern void _text, _etext, __data_start, _end, __init_begin, __init_end; -extern unsigned long phys_initrd_start; -extern unsigned long phys_initrd_size; +#ifdef CONFIG_CPU_CP15_MMU +unsigned long __init __clear_cr(unsigned long mask) +{ + cr_alignment = cr_alignment & ~mask; + return cr_alignment; +} +#endif -/* - * This is used to pass memory configuration data from paging_init - * to mem_init, and by show_mem() to skip holes in the memory map. - */ -static struct meminfo meminfo = { 0, }; +static phys_addr_t phys_initrd_start __initdata = 0; +static unsigned long phys_initrd_size __initdata = 0; + +static int __init early_initrd(char *p) +{ + phys_addr_t start; + unsigned long size; + char *endp; + + start = memparse(p, &endp); + if (*endp == ',') { + size = memparse(endp + 1, NULL); + + phys_initrd_start = start; + phys_initrd_size = size; + } + return 0; +} +early_param("initrd", early_initrd); + +static int __init parse_tag_initrd(const struct tag *tag) +{ + printk(KERN_WARNING "ATAG_INITRD is deprecated; " + "please update your bootloader.\n"); + phys_initrd_start = __virt_to_phys(tag->u.initrd.start); + phys_initrd_size = tag->u.initrd.size; + return 0; +} -#define for_each_nodebank(iter,mi,no) \ - for (iter = 0; iter < mi->nr_banks; iter++) \ - if (mi->bank[iter].node == no) +__tagtable(ATAG_INITRD, parse_tag_initrd); -void show_mem(void) +static int __init parse_tag_initrd2(const struct tag *tag) +{ + phys_initrd_start = tag->u.initrd.start; + phys_initrd_size = tag->u.initrd.size; + return 0; +} + +__tagtable(ATAG_INITRD2, parse_tag_initrd2); + +/* + * This keeps memory configuration data used by a couple memory + * initialization functions, as well as show_mem() for the skipping + * of holes in the memory map. It is populated by arm_add_memory(). + */ +void show_mem(unsigned int filter) { int free = 0, total = 0, reserved = 0; - int shared = 0, cached = 0, slab = 0, node, i; - struct meminfo * mi = &meminfo; + int shared = 0, cached = 0, slab = 0; + struct memblock_region *reg; printk("Mem-info:\n"); - show_free_areas(); - printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); - - for_each_online_node(node) { - pg_data_t *n = NODE_DATA(node); - struct page *map = n->node_mem_map - n->node_start_pfn; - - for_each_nodebank (i,mi,node) { - unsigned int pfn1, pfn2; - struct page *page, *end; - - pfn1 = __phys_to_pfn(mi->bank[i].start); - pfn2 = __phys_to_pfn(mi->bank[i].size + mi->bank[i].start); - - page = map + pfn1; - end = map + pfn2; - - do { - total++; - if (PageReserved(page)) - reserved++; - else if (PageSwapCache(page)) - cached++; - else if (PageSlab(page)) - slab++; - else if (!page_count(page)) - free++; - else - shared += page_count(page) - 1; - page++; - } while (page < end); - } + show_free_areas(filter); + + for_each_memblock (memory, reg) { + unsigned int pfn1, pfn2; + struct page *page, *end; + + pfn1 = memblock_region_memory_base_pfn(reg); + pfn2 = memblock_region_memory_end_pfn(reg); + + page = pfn_to_page(pfn1); + end = pfn_to_page(pfn2 - 1) + 1; + + do { + total++; + if (PageReserved(page)) + reserved++; + else if (PageSwapCache(page)) + cached++; + else if (PageSlab(page)) + slab++; + else if (!page_count(page)) + free++; + else + shared += page_count(page) - 1; + pfn1++; + page = pfn_to_page(pfn1); + } while (pfn1 < pfn2); } printk("%d pages of RAM\n", total); @@ -89,282 +134,259 @@ void show_mem(void) printk("%d pages swap cached\n", cached); } -/* - * FIXME: We really want to avoid allocating the bootmap bitmap - * over the top of the initrd. Hopefully, this is located towards - * the start of a bank, so if we allocate the bootmap bitmap at - * the end, we won't clash. - */ -static unsigned int __init -find_bootmap_pfn(int node, struct meminfo *mi, unsigned int bootmap_pages) +static void __init find_limits(unsigned long *min, unsigned long *max_low, + unsigned long *max_high) { - unsigned int start_pfn, bank, bootmap_pfn; - - start_pfn = PAGE_ALIGN(__pa(&_end)) >> PAGE_SHIFT; - bootmap_pfn = 0; - - for_each_nodebank(bank, mi, node) { - unsigned int start, end; + *max_low = PFN_DOWN(memblock_get_current_limit()); + *min = PFN_UP(memblock_start_of_DRAM()); + *max_high = PFN_DOWN(memblock_end_of_DRAM()); +} - start = mi->bank[bank].start >> PAGE_SHIFT; - end = (mi->bank[bank].size + - mi->bank[bank].start) >> PAGE_SHIFT; +#ifdef CONFIG_ZONE_DMA - if (end < start_pfn) - continue; +phys_addr_t arm_dma_zone_size __read_mostly; +EXPORT_SYMBOL(arm_dma_zone_size); - if (start < start_pfn) - start = start_pfn; - - if (end <= start) - continue; +/* + * The DMA mask corresponding to the maximum bus address allocatable + * using GFP_DMA. The default here places no restriction on DMA + * allocations. This must be the smallest DMA mask in the system, + * so a successful GFP_DMA allocation will always satisfy this. + */ +phys_addr_t arm_dma_limit; +unsigned long arm_dma_pfn_limit; - if (end - start >= bootmap_pages) { - bootmap_pfn = start; - break; - } - } +static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole, + unsigned long dma_size) +{ + if (size[0] <= dma_size) + return; - if (bootmap_pfn == 0) - BUG(); + size[ZONE_NORMAL] = size[0] - dma_size; + size[ZONE_DMA] = dma_size; + hole[ZONE_NORMAL] = hole[0]; + hole[ZONE_DMA] = 0; +} +#endif - return bootmap_pfn; +void __init setup_dma_zone(const struct machine_desc *mdesc) +{ +#ifdef CONFIG_ZONE_DMA + if (mdesc->dma_zone_size) { + arm_dma_zone_size = mdesc->dma_zone_size; + arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1; + } else + arm_dma_limit = 0xffffffff; + arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT; +#endif } -static int __init check_initrd(struct meminfo *mi) +static void __init zone_sizes_init(unsigned long min, unsigned long max_low, + unsigned long max_high) { - int initrd_node = -2; -#ifdef CONFIG_BLK_DEV_INITRD - unsigned long end = phys_initrd_start + phys_initrd_size; + unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES]; + struct memblock_region *reg; /* - * Make sure that the initrd is within a valid area of - * memory. + * initialise the zones. */ - if (phys_initrd_size) { - unsigned int i; - - initrd_node = -1; - - for (i = 0; i < mi->nr_banks; i++) { - unsigned long bank_end; + memset(zone_size, 0, sizeof(zone_size)); - bank_end = mi->bank[i].start + mi->bank[i].size; + /* + * The memory size has already been determined. If we need + * to do anything fancy with the allocation of this memory + * to the zones, now is the time to do it. + */ + zone_size[0] = max_low - min; +#ifdef CONFIG_HIGHMEM + zone_size[ZONE_HIGHMEM] = max_high - max_low; +#endif - if (mi->bank[i].start <= phys_initrd_start && - end <= bank_end) - initrd_node = mi->bank[i].node; + /* + * Calculate the size of the holes. + * holes = node_size - sum(bank_sizes) + */ + memcpy(zhole_size, zone_size, sizeof(zhole_size)); + for_each_memblock(memory, reg) { + unsigned long start = memblock_region_memory_base_pfn(reg); + unsigned long end = memblock_region_memory_end_pfn(reg); + + if (start < max_low) { + unsigned long low_end = min(end, max_low); + zhole_size[0] -= low_end - start; } +#ifdef CONFIG_HIGHMEM + if (end > max_low) { + unsigned long high_start = max(start, max_low); + zhole_size[ZONE_HIGHMEM] -= end - high_start; + } +#endif } - if (initrd_node == -1) { - printk(KERN_ERR "initrd (0x%08lx - 0x%08lx) extends beyond " - "physical memory - disabling initrd\n", - phys_initrd_start, end); - phys_initrd_start = phys_initrd_size = 0; - } +#ifdef CONFIG_ZONE_DMA + /* + * Adjust the sizes according to any special requirements for + * this machine type. + */ + if (arm_dma_zone_size) + arm_adjust_dma_zone(zone_size, zhole_size, + arm_dma_zone_size >> PAGE_SHIFT); #endif - return initrd_node; + free_area_init_node(0, zone_size, min, zhole_size); } -static inline void map_memory_bank(struct membank *bank) +#ifdef CONFIG_HAVE_ARCH_PFN_VALID +int pfn_valid(unsigned long pfn) { -#ifdef CONFIG_MMU - struct map_desc map; - - map.pfn = __phys_to_pfn(bank->start); - map.virtual = __phys_to_virt(bank->start); - map.length = bank->size; - map.type = MT_MEMORY; - - create_mapping(&map); -#endif + return memblock_is_memory(__pfn_to_phys(pfn)); } +EXPORT_SYMBOL(pfn_valid); +#endif -static unsigned long __init -bootmem_init_node(int node, int initrd_node, struct meminfo *mi) +#ifndef CONFIG_SPARSEMEM +static void __init arm_memory_present(void) { - unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES]; - unsigned long start_pfn, end_pfn, boot_pfn; - unsigned int boot_pages; - pg_data_t *pgdat; - int i; - - start_pfn = -1UL; - end_pfn = 0; - - /* - * Calculate the pfn range, and map the memory banks for this node. - */ - for_each_nodebank(i, mi, node) { - struct membank *bank = &mi->bank[i]; - unsigned long start, end; - - start = bank->start >> PAGE_SHIFT; - end = (bank->start + bank->size) >> PAGE_SHIFT; - - if (start_pfn > start) - start_pfn = start; - if (end_pfn < end) - end_pfn = end; +} +#else +static void __init arm_memory_present(void) +{ + struct memblock_region *reg; - map_memory_bank(bank); - } + for_each_memblock(memory, reg) + memory_present(0, memblock_region_memory_base_pfn(reg), + memblock_region_memory_end_pfn(reg)); +} +#endif - /* - * If there is no memory in this node, ignore it. - */ - if (end_pfn == 0) - return end_pfn; +static bool arm_memblock_steal_permitted = true; - /* - * Allocate the bootmem bitmap page. - */ - boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn); - boot_pfn = find_bootmap_pfn(node, mi, boot_pages); +phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align) +{ + phys_addr_t phys; - /* - * Initialise the bootmem allocator for this node, handing the - * memory banks over to bootmem. - */ - node_set_online(node); - pgdat = NODE_DATA(node); - init_bootmem_node(pgdat, boot_pfn, start_pfn, end_pfn); + BUG_ON(!arm_memblock_steal_permitted); - for_each_nodebank(i, mi, node) - free_bootmem_node(pgdat, mi->bank[i].start, mi->bank[i].size); + phys = memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ANYWHERE); + memblock_free(phys, size); + memblock_remove(phys, size); - /* - * Reserve the bootmem bitmap for this node. - */ - reserve_bootmem_node(pgdat, boot_pfn << PAGE_SHIFT, - boot_pages << PAGE_SHIFT, BOOTMEM_DEFAULT); + return phys; +} +void __init arm_memblock_init(const struct machine_desc *mdesc) +{ + /* Register the kernel text, kernel data and initrd with memblock. */ +#ifdef CONFIG_XIP_KERNEL + memblock_reserve(__pa(_sdata), _end - _sdata); +#else + memblock_reserve(__pa(_stext), _end - _stext); +#endif #ifdef CONFIG_BLK_DEV_INITRD - /* - * If the initrd is in this node, reserve its memory. - */ - if (node == initrd_node) { - reserve_bootmem_node(pgdat, phys_initrd_start, - phys_initrd_size, BOOTMEM_DEFAULT); + /* FDT scan will populate initrd_start */ + if (initrd_start && !phys_initrd_size) { + phys_initrd_start = __virt_to_phys(initrd_start); + phys_initrd_size = initrd_end - initrd_start; + } + initrd_start = initrd_end = 0; + if (phys_initrd_size && + !memblock_is_region_memory(phys_initrd_start, phys_initrd_size)) { + pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region - disabling initrd\n", + (u64)phys_initrd_start, phys_initrd_size); + phys_initrd_start = phys_initrd_size = 0; + } + if (phys_initrd_size && + memblock_is_region_reserved(phys_initrd_start, phys_initrd_size)) { + pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region - disabling initrd\n", + (u64)phys_initrd_start, phys_initrd_size); + phys_initrd_start = phys_initrd_size = 0; + } + if (phys_initrd_size) { + memblock_reserve(phys_initrd_start, phys_initrd_size); + + /* Now convert initrd to virtual addresses */ initrd_start = __phys_to_virt(phys_initrd_start); initrd_end = initrd_start + phys_initrd_size; } #endif - /* - * Finally, reserve any node zero regions. - */ - if (node == 0) - reserve_node_zero(pgdat); - - /* - * initialise the zones within this node. - */ - memset(zone_size, 0, sizeof(zone_size)); - memset(zhole_size, 0, sizeof(zhole_size)); + arm_mm_memblock_reserve(); - /* - * The size of this node has already been determined. If we need - * to do anything fancy with the allocation of this memory to the - * zones, now is the time to do it. - */ - zone_size[0] = end_pfn - start_pfn; + /* reserve any platform specific memblock areas */ + if (mdesc->reserve) + mdesc->reserve(); - /* - * For each bank in this node, calculate the size of the holes. - * holes = node_size - sum(bank_sizes_in_node) - */ - zhole_size[0] = zone_size[0]; - for_each_nodebank(i, mi, node) - zhole_size[0] -= mi->bank[i].size >> PAGE_SHIFT; + early_init_fdt_scan_reserved_mem(); /* - * Adjust the sizes according to any special requirements for - * this machine type. + * reserve memory for DMA contigouos allocations, + * must come from DMA area inside low memory */ - arch_adjust_zones(node, zone_size, zhole_size); - - free_area_init_node(node, pgdat, zone_size, start_pfn, zhole_size); + dma_contiguous_reserve(min(arm_dma_limit, arm_lowmem_limit)); - return end_pfn; + arm_memblock_steal_permitted = false; + memblock_dump_all(); } -void __init bootmem_init(struct meminfo *mi) +void __init bootmem_init(void) { - unsigned long memend_pfn = 0; - int node, initrd_node, i; + unsigned long min, max_low, max_high; - /* - * Invalidate the node number for empty or invalid memory banks - */ - for (i = 0; i < mi->nr_banks; i++) - if (mi->bank[i].size == 0 || mi->bank[i].node >= MAX_NUMNODES) - mi->bank[i].node = -1; + memblock_allow_resize(); + max_low = max_high = 0; - memcpy(&meminfo, mi, sizeof(meminfo)); + find_limits(&min, &max_low, &max_high); /* - * Locate which node contains the ramdisk image, if any. + * Sparsemem tries to allocate bootmem in memory_present(), + * so must be done after the fixed reservations */ - initrd_node = check_initrd(mi); + arm_memory_present(); /* - * Run through each node initialising the bootmem allocator. + * sparse_init() needs the bootmem allocator up and running. */ - for_each_node(node) { - unsigned long end_pfn; - - end_pfn = bootmem_init_node(node, initrd_node, mi); + sparse_init(); - /* - * Remember the highest memory PFN. - */ - if (end_pfn > memend_pfn) - memend_pfn = end_pfn; - } - - high_memory = __va(memend_pfn << PAGE_SHIFT); + /* + * Now free the memory - free_area_init_node needs + * the sparse mem_map arrays initialized by sparse_init() + * for memmap_init_zone(), otherwise all PFNs are invalid. + */ + zone_sizes_init(min, max_low, max_high); /* * This doesn't seem to be used by the Linux memory manager any * more, but is used by ll_rw_block. If we can get rid of it, we * also get rid of some of the stuff above as well. - * - * Note: max_low_pfn and max_pfn reflect the number of _pages_ in - * the system, not the maximum PFN. */ - max_pfn = max_low_pfn = memend_pfn - PHYS_PFN_OFFSET; + min_low_pfn = min; + max_low_pfn = max_low; + max_pfn = max_high; } -static inline void free_area(unsigned long addr, unsigned long end, char *s) +/* + * Poison init memory with an undefined instruction (ARM) or a branch to an + * undefined instruction (Thumb). + */ +static inline void poison_init_mem(void *s, size_t count) { - unsigned int size = (end - addr) >> 10; - - for (; addr < end; addr += PAGE_SIZE) { - struct page *page = virt_to_page(addr); - ClearPageReserved(page); - init_page_count(page); - free_page(addr); - totalram_pages++; - } - - if (size && s) - printk(KERN_INFO "Freeing %s memory: %dK\n", s, size); + u32 *p = (u32 *)s; + for (; count != 0; count -= 4) + *p++ = 0xe7fddef0; } static inline void -free_memmap(int node, unsigned long start_pfn, unsigned long end_pfn) +free_memmap(unsigned long start_pfn, unsigned long end_pfn) { struct page *start_pg, *end_pg; - unsigned long pg, pgend; + phys_addr_t pg, pgend; /* * Convert start_pfn/end_pfn to a struct page pointer. */ - start_pg = pfn_to_page(start_pfn); - end_pg = pfn_to_page(end_pfn); + start_pg = pfn_to_page(start_pfn - 1) + 1; + end_pg = pfn_to_page(end_pfn - 1) + 1; /* * Convert to physical addresses, and @@ -378,40 +400,116 @@ free_memmap(int node, unsigned long start_pfn, unsigned long end_pfn) * free the section of the memmap array. */ if (pg < pgend) - free_bootmem_node(NODE_DATA(node), pg, pgend - pg); + memblock_free_early(pg, pgend - pg); } /* * The mem_map array can get very big. Free the unused area of the memory map. */ -static void __init free_unused_memmap_node(int node, struct meminfo *mi) +static void __init free_unused_memmap(void) { - unsigned long bank_start, prev_bank_end = 0; - unsigned int i; + unsigned long start, prev_end = 0; + struct memblock_region *reg; /* - * [FIXME] This relies on each bank being in address order. This - * may not be the case, especially if the user has provided the - * information on the command line. + * This relies on each bank being in address order. + * The banks are sorted previously in bootmem_init(). */ - for_each_nodebank(i, mi, node) { - bank_start = mi->bank[i].start >> PAGE_SHIFT; - if (bank_start < prev_bank_end) { - printk(KERN_ERR "MEM: unordered memory banks. " - "Not freeing memmap.\n"); - break; - } + for_each_memblock(memory, reg) { + start = memblock_region_memory_base_pfn(reg); +#ifdef CONFIG_SPARSEMEM + /* + * Take care not to free memmap entries that don't exist + * due to SPARSEMEM sections which aren't present. + */ + start = min(start, + ALIGN(prev_end, PAGES_PER_SECTION)); +#else + /* + * Align down here since the VM subsystem insists that the + * memmap entries are valid from the bank start aligned to + * MAX_ORDER_NR_PAGES. + */ + start = round_down(start, MAX_ORDER_NR_PAGES); +#endif /* * If we had a previous bank, and there is a space * between the current bank and the previous, free it. */ - if (prev_bank_end && prev_bank_end != bank_start) - free_memmap(node, prev_bank_end, bank_start); + if (prev_end && prev_end < start) + free_memmap(prev_end, start); + + /* + * Align up here since the VM subsystem insists that the + * memmap entries are valid from the bank end aligned to + * MAX_ORDER_NR_PAGES. + */ + prev_end = ALIGN(memblock_region_memory_end_pfn(reg), + MAX_ORDER_NR_PAGES); + } + +#ifdef CONFIG_SPARSEMEM + if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION)) + free_memmap(prev_end, + ALIGN(prev_end, PAGES_PER_SECTION)); +#endif +} + +#ifdef CONFIG_HIGHMEM +static inline void free_area_high(unsigned long pfn, unsigned long end) +{ + for (; pfn < end; pfn++) + free_highmem_page(pfn_to_page(pfn)); +} +#endif + +static void __init free_highpages(void) +{ +#ifdef CONFIG_HIGHMEM + unsigned long max_low = max_low_pfn; + struct memblock_region *mem, *res; - prev_bank_end = (mi->bank[i].start + - mi->bank[i].size) >> PAGE_SHIFT; + /* set highmem page free */ + for_each_memblock(memory, mem) { + unsigned long start = memblock_region_memory_base_pfn(mem); + unsigned long end = memblock_region_memory_end_pfn(mem); + + /* Ignore complete lowmem entries */ + if (end <= max_low) + continue; + + /* Truncate partial highmem entries */ + if (start < max_low) + start = max_low; + + /* Find and exclude any reserved regions */ + for_each_memblock(reserved, res) { + unsigned long res_start, res_end; + + res_start = memblock_region_reserved_base_pfn(res); + res_end = memblock_region_reserved_end_pfn(res); + + if (res_end < start) + continue; + if (res_start < start) + res_start = start; + if (res_start > end) + res_start = end; + if (res_end > end) + res_end = end; + if (res_start != start) + free_area_high(start, res_start); + start = res_end; + if (start == end) + break; + } + + /* And now free anything which remains */ + if (start < end) + free_area_high(start, end); } +#endif } /* @@ -421,51 +519,92 @@ static void __init free_unused_memmap_node(int node, struct meminfo *mi) */ void __init mem_init(void) { - unsigned int codepages, datapages, initpages; - int i, node; +#ifdef CONFIG_HAVE_TCM + /* These pointers are filled in on TCM detection */ + extern u32 dtcm_end; + extern u32 itcm_end; +#endif + + set_max_mapnr(pfn_to_page(max_pfn) - mem_map); - codepages = &_etext - &_text; - datapages = &_end - &__data_start; - initpages = &__init_end - &__init_begin; + /* this will put all unused low memory onto the freelists */ + free_unused_memmap(); + free_all_bootmem(); -#ifndef CONFIG_DISCONTIGMEM - max_mapnr = virt_to_page(high_memory) - mem_map; +#ifdef CONFIG_SA1111 + /* now that our DMA memory is actually so designated, we can free it */ + free_reserved_area(__va(PHYS_OFFSET), swapper_pg_dir, -1, NULL); #endif - /* this will put all unused low memory onto the freelists */ - for_each_online_node(node) { - pg_data_t *pgdat = NODE_DATA(node); + free_highpages(); - free_unused_memmap_node(node, &meminfo); + mem_init_print_info(NULL); - if (pgdat->node_spanned_pages != 0) - totalram_pages += free_all_bootmem_node(pgdat); - } +#define MLK(b, t) b, t, ((t) - (b)) >> 10 +#define MLM(b, t) b, t, ((t) - (b)) >> 20 +#define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K) -#ifdef CONFIG_SA1111 - /* now that our DMA memory is actually so designated, we can free it */ - free_area(PAGE_OFFSET, (unsigned long)swapper_pg_dir, NULL); + printk(KERN_NOTICE "Virtual kernel memory layout:\n" + " vector : 0x%08lx - 0x%08lx (%4ld kB)\n" +#ifdef CONFIG_HAVE_TCM + " DTCM : 0x%08lx - 0x%08lx (%4ld kB)\n" + " ITCM : 0x%08lx - 0x%08lx (%4ld kB)\n" +#endif + " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" + " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n" + " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n" +#ifdef CONFIG_HIGHMEM + " pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n" +#endif +#ifdef CONFIG_MODULES + " modules : 0x%08lx - 0x%08lx (%4ld MB)\n" #endif + " .text : 0x%p" " - 0x%p" " (%4d kB)\n" + " .init : 0x%p" " - 0x%p" " (%4d kB)\n" + " .data : 0x%p" " - 0x%p" " (%4d kB)\n" + " .bss : 0x%p" " - 0x%p" " (%4d kB)\n", + + MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) + + (PAGE_SIZE)), +#ifdef CONFIG_HAVE_TCM + MLK(DTCM_OFFSET, (unsigned long) dtcm_end), + MLK(ITCM_OFFSET, (unsigned long) itcm_end), +#endif + MLK(FIXADDR_START, FIXADDR_TOP), + MLM(VMALLOC_START, VMALLOC_END), + MLM(PAGE_OFFSET, (unsigned long)high_memory), +#ifdef CONFIG_HIGHMEM + MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) * + (PAGE_SIZE)), +#endif +#ifdef CONFIG_MODULES + MLM(MODULES_VADDR, MODULES_END), +#endif + + MLK_ROUNDUP(_text, _etext), + MLK_ROUNDUP(__init_begin, __init_end), + MLK_ROUNDUP(_sdata, _edata), + MLK_ROUNDUP(__bss_start, __bss_stop)); + +#undef MLK +#undef MLM +#undef MLK_ROUNDUP /* - * Since our memory may not be contiguous, calculate the - * real number of pages we have in this system + * Check boundaries twice: Some fundamental inconsistencies can + * be detected at build time already. */ - printk(KERN_INFO "Memory:"); - - num_physpages = 0; - for (i = 0; i < meminfo.nr_banks; i++) { - num_physpages += meminfo.bank[i].size >> PAGE_SHIFT; - printk(" %ldMB", meminfo.bank[i].size >> 20); - } +#ifdef CONFIG_MMU + BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR); + BUG_ON(TASK_SIZE > MODULES_VADDR); +#endif - printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT)); - printk(KERN_NOTICE "Memory: %luKB available (%dK code, " - "%dK data, %dK init)\n", - (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), - codepages >> 10, datapages >> 10, initpages >> 10); +#ifdef CONFIG_HIGHMEM + BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET); + BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET); +#endif - if (PAGE_SIZE >= 16384 && num_physpages <= 128) { + if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) { extern int sysctl_overcommit_memory; /* * On a machine this small we won't get @@ -478,11 +617,16 @@ void __init mem_init(void) void free_initmem(void) { - if (!machine_is_integrator() && !machine_is_cintegrator()) { - free_area((unsigned long)(&__init_begin), - (unsigned long)(&__init_end), - "init"); - } +#ifdef CONFIG_HAVE_TCM + extern char __tcm_start, __tcm_end; + + poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start); + free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link"); +#endif + + poison_init_mem(__init_begin, __init_end - __init_begin); + if (!machine_is_integrator() && !machine_is_cintegrator()) + free_initmem_default(-1); } #ifdef CONFIG_BLK_DEV_INITRD @@ -491,8 +635,10 @@ static int keep_initrd; void free_initrd_mem(unsigned long start, unsigned long end) { - if (!keep_initrd) - free_area(start, end, "initrd"); + if (!keep_initrd) { + poison_init_mem((void *)start, PAGE_ALIGN(end) - start); + free_reserved_area((void *)start, (void *)end, -1, "initrd"); + } } static int __init keepinitrd_setup(char *__unused) |
