diff options
Diffstat (limited to 'arch/sh/mm/init.c')
| -rw-r--r-- | arch/sh/mm/init.c | 656 |
1 files changed, 438 insertions, 218 deletions
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 4e9c854845a..2d089fe2cba 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -1,136 +1,104 @@ -/* $Id: init.c,v 1.19 2004/02/21 04:42:16 kkojima Exp $ - * - * linux/arch/sh/mm/init.c +/* + * linux/arch/sh/mm/init.c * * Copyright (C) 1999 Niibe Yutaka - * Copyright (C) 2002, 2004 Paul Mundt + * Copyright (C) 2002 - 2011 Paul Mundt * * Based on linux/arch/i386/mm/init.c: * Copyright (C) 1995 Linus Torvalds */ - -#include <linux/config.h> -#include <linux/signal.h> -#include <linux/sched.h> -#include <linux/kernel.h> -#include <linux/errno.h> -#include <linux/string.h> -#include <linux/types.h> -#include <linux/ptrace.h> -#include <linux/mman.h> #include <linux/mm.h> #include <linux/swap.h> -#include <linux/smp.h> #include <linux/init.h> -#include <linux/highmem.h> +#include <linux/gfp.h> #include <linux/bootmem.h> +#include <linux/proc_fs.h> #include <linux/pagemap.h> - -#include <asm/processor.h> -#include <asm/system.h> -#include <asm/uaccess.h> -#include <asm/pgtable.h> -#include <asm/pgalloc.h> +#include <linux/percpu.h> +#include <linux/io.h> +#include <linux/memblock.h> +#include <linux/dma-mapping.h> +#include <linux/export.h> #include <asm/mmu_context.h> -#include <asm/io.h> +#include <asm/mmzone.h> +#include <asm/kexec.h> #include <asm/tlb.h> #include <asm/cacheflush.h> +#include <asm/sections.h> +#include <asm/setup.h> #include <asm/cache.h> +#include <asm/sizes.h> -DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); pgd_t swapper_pg_dir[PTRS_PER_PGD]; -/* - * Cache of MMU context last used. - */ -unsigned long mmu_context_cache = NO_CONTEXT; - -#ifdef CONFIG_MMU -/* It'd be good if these lines were in the standard header file. */ -#define START_PFN (NODE_DATA(0)->bdata->node_boot_start >> PAGE_SHIFT) -#define MAX_LOW_PFN (NODE_DATA(0)->bdata->node_low_pfn) -#endif - -#ifdef CONFIG_DISCONTIGMEM -pg_data_t discontig_page_data[MAX_NUMNODES]; -bootmem_data_t discontig_node_bdata[MAX_NUMNODES]; -#endif - -void (*copy_page)(void *from, void *to); -void (*clear_page)(void *to); +void __init generic_mem_init(void) +{ + memblock_add(__MEMORY_START, __MEMORY_SIZE); +} -void show_mem(void) +void __init __weak plat_mem_setup(void) { - int i, total = 0, reserved = 0; - int shared = 0, cached = 0; - - printk("Mem-info:\n"); - show_free_areas(); - printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); - i = max_mapnr; - while (i-- > 0) { - total++; - if (PageReserved(mem_map+i)) - reserved++; - else if (PageSwapCache(mem_map+i)) - cached++; - else if (page_count(mem_map+i)) - shared += page_count(mem_map+i) - 1; - } - printk("%d pages of RAM\n",total); - printk("%d reserved pages\n",reserved); - printk("%d pages shared\n",shared); - printk("%d pages swap cached\n",cached); + /* Nothing to see here, move along. */ } -static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) +#ifdef CONFIG_MMU +static pte_t *__get_pte_phys(unsigned long addr) { pgd_t *pgd; + pud_t *pud; pmd_t *pmd; - pte_t *pte; - pgd = swapper_pg_dir + pgd_index(addr); + pgd = pgd_offset_k(addr); if (pgd_none(*pgd)) { pgd_ERROR(*pgd); - return; + return NULL; } - pmd = pmd_offset(pgd, addr); - if (pmd_none(*pmd)) { - pte = (pte_t *)get_zeroed_page(GFP_ATOMIC); - set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER)); - if (pte != pte_offset_kernel(pmd, 0)) { - pmd_ERROR(*pmd); - return; - } + pud = pud_alloc(NULL, pgd, addr); + if (unlikely(!pud)) { + pud_ERROR(*pud); + return NULL; } - pte = pte_offset_kernel(pmd, addr); + pmd = pmd_alloc(NULL, pud, addr); + if (unlikely(!pmd)) { + pmd_ERROR(*pmd); + return NULL; + } + + return pte_offset_kernel(pmd, addr); +} + +static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) +{ + pte_t *pte; + + pte = __get_pte_phys(addr); if (!pte_none(*pte)) { pte_ERROR(*pte); return; } set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot)); + local_flush_tlb_one(get_asid(), addr); - __flush_tlb_page(get_asid(), addr); + if (pgprot_val(prot) & _PAGE_WIRED) + tlb_wire_entry(NULL, addr, *pte); +} + +static void clear_pte_phys(unsigned long addr, pgprot_t prot) +{ + pte_t *pte; + + pte = __get_pte_phys(addr); + + if (pgprot_val(prot) & _PAGE_WIRED) + tlb_unwire_entry(); + + set_pte(pte, pfn_pte(0, __pgprot(0))); + local_flush_tlb_one(get_asid(), addr); } -/* - * As a performance optimization, other platforms preserve the fixmap mapping - * across a context switch, we don't presently do this, but this could be done - * in a similar fashion as to the wired TLB interface that sh64 uses (by way - * of the memorry mapped UTLB configuration) -- this unfortunately forces us to - * give up a TLB entry for each mapping we want to preserve. While this may be - * viable for a small number of fixmaps, it's not particularly useful for - * everything and needs to be carefully evaluated. (ie, we may want this for - * the vsyscall page). - * - * XXX: Perhaps add a _PAGE_WIRED flag or something similar that we can pass - * in at __set_fixmap() time to determine the appropriate behavior to follow. - * - * -- PFM. - */ void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot) { unsigned long address = __fix_to_virt(idx); @@ -143,171 +111,423 @@ void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot) set_pte_phys(address, phys, prot); } -/* References to section boundaries */ +void __clear_fixmap(enum fixed_addresses idx, pgprot_t prot) +{ + unsigned long address = __fix_to_virt(idx); -extern char _text, _etext, _edata, __bss_start, _end; -extern char __init_begin, __init_end; + if (idx >= __end_of_fixed_addresses) { + BUG(); + return; + } -/* - * paging_init() sets up the page tables - * - * This routines also unmaps the page at virtual kernel address 0, so - * that we can trap those pesky NULL-reference errors in the kernel. - */ -void __init paging_init(void) + clear_pte_phys(address, prot); +} + +static pmd_t * __init one_md_table_init(pud_t *pud) { - unsigned long zones_size[MAX_NR_ZONES] = { 0, }; + if (pud_none(*pud)) { + pmd_t *pmd; - /* - * Setup some defaults for the zone sizes.. these should be safe - * regardless of distcontiguous memory or MMU settings. - */ - zones_size[ZONE_DMA] = 0 >> PAGE_SHIFT; - zones_size[ZONE_NORMAL] = __MEMORY_SIZE >> PAGE_SHIFT; -#ifdef CONFIG_HIGHMEM - zones_size[ZONE_HIGHMEM] = 0 >> PAGE_SHIFT; + pmd = alloc_bootmem_pages(PAGE_SIZE); + pud_populate(&init_mm, pud, pmd); + BUG_ON(pmd != pmd_offset(pud, 0)); + } + + return pmd_offset(pud, 0); +} + +static pte_t * __init one_page_table_init(pmd_t *pmd) +{ + if (pmd_none(*pmd)) { + pte_t *pte; + + pte = alloc_bootmem_pages(PAGE_SIZE); + pmd_populate_kernel(&init_mm, pmd, pte); + BUG_ON(pte != pte_offset_kernel(pmd, 0)); + } + + return pte_offset_kernel(pmd, 0); +} + +static pte_t * __init page_table_kmap_check(pte_t *pte, pmd_t *pmd, + unsigned long vaddr, pte_t *lastpte) +{ + return pte; +} + +void __init page_table_range_init(unsigned long start, unsigned long end, + pgd_t *pgd_base) +{ + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; + pte_t *pte = NULL; + int i, j, k; + unsigned long vaddr; + + vaddr = start; + i = __pgd_offset(vaddr); + j = __pud_offset(vaddr); + k = __pmd_offset(vaddr); + pgd = pgd_base + i; + + for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) { + pud = (pud_t *)pgd; + for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) { + pmd = one_md_table_init(pud); +#ifndef __PAGETABLE_PMD_FOLDED + pmd += k; #endif + for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) { + pte = page_table_kmap_check(one_page_table_init(pmd), + pmd, vaddr, pte); + vaddr += PMD_SIZE; + } + k = 0; + } + j = 0; + } +} +#endif /* CONFIG_MMU */ + +void __init allocate_pgdat(unsigned int nid) +{ + unsigned long start_pfn, end_pfn; +#ifdef CONFIG_NEED_MULTIPLE_NODES + unsigned long phys; +#endif + + get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); + +#ifdef CONFIG_NEED_MULTIPLE_NODES + phys = __memblock_alloc_base(sizeof(struct pglist_data), + SMP_CACHE_BYTES, end_pfn << PAGE_SHIFT); + /* Retry with all of system memory */ + if (!phys) + phys = __memblock_alloc_base(sizeof(struct pglist_data), + SMP_CACHE_BYTES, memblock_end_of_DRAM()); + if (!phys) + panic("Can't allocate pgdat for node %d\n", nid); + + NODE_DATA(nid) = __va(phys); + memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); + + NODE_DATA(nid)->bdata = &bootmem_node_data[nid]; +#endif + + NODE_DATA(nid)->node_start_pfn = start_pfn; + NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; +} + +static void __init bootmem_init_one_node(unsigned int nid) +{ + unsigned long total_pages, paddr; + unsigned long end_pfn; + struct pglist_data *p; + + p = NODE_DATA(nid); + + /* Nothing to do.. */ + if (!p->node_spanned_pages) + return; + + end_pfn = pgdat_end_pfn(p); + + total_pages = bootmem_bootmap_pages(p->node_spanned_pages); + + paddr = memblock_alloc(total_pages << PAGE_SHIFT, PAGE_SIZE); + if (!paddr) + panic("Can't allocate bootmap for nid[%d]\n", nid); + + init_bootmem_node(p, paddr >> PAGE_SHIFT, p->node_start_pfn, end_pfn); + + free_bootmem_with_active_regions(nid, end_pfn); -#ifdef CONFIG_MMU /* - * If we have an MMU, and want to be using it .. we need to adjust - * the zone sizes accordingly, in addition to turning it on. + * XXX Handle initial reservations for the system memory node + * only for the moment, we'll refactor this later for handling + * reservations in other nodes. */ - { - unsigned long max_dma, low, start_pfn; - pgd_t *pg_dir; - int i; - - /* We don't need kernel mapping as hardware support that. */ - pg_dir = swapper_pg_dir; - - for (i = 0; i < PTRS_PER_PGD; i++) - pgd_val(pg_dir[i]) = 0; - - /* Turn on the MMU */ - enable_mmu(); - - /* Fixup the zone sizes */ - start_pfn = START_PFN; - max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT; - low = MAX_LOW_PFN; - - if (low < max_dma) { - zones_size[ZONE_DMA] = low - start_pfn; - zones_size[ZONE_NORMAL] = 0; - } else { - zones_size[ZONE_DMA] = max_dma - start_pfn; - zones_size[ZONE_NORMAL] = low - max_dma; + if (nid == 0) { + struct memblock_region *reg; + + /* Reserve the sections we're already using. */ + for_each_memblock(reserved, reg) { + reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT); } } -#elif defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4) + sparse_memory_present_with_active_regions(nid); +} + +static void __init do_init_bootmem(void) +{ + struct memblock_region *reg; + int i; + + /* Add active regions with valid PFNs. */ + for_each_memblock(memory, reg) { + unsigned long start_pfn, end_pfn; + start_pfn = memblock_region_memory_base_pfn(reg); + end_pfn = memblock_region_memory_end_pfn(reg); + __add_active_range(0, start_pfn, end_pfn); + } + + /* All of system RAM sits in node 0 for the non-NUMA case */ + allocate_pgdat(0); + node_set_online(0); + + plat_mem_setup(); + + for_each_online_node(i) + bootmem_init_one_node(i); + + sparse_init(); +} + +static void __init early_reserve_mem(void) +{ + unsigned long start_pfn; + u32 zero_base = (u32)__MEMORY_START + (u32)PHYSICAL_OFFSET; + u32 start = zero_base + (u32)CONFIG_ZERO_PAGE_OFFSET; + /* - * If we don't have CONFIG_MMU set and the processor in question - * still has an MMU, care needs to be taken to make sure it doesn't - * stay on.. Since the boot loader could have potentially already - * turned it on, and we clearly don't want it, we simply turn it off. - * - * We don't need to do anything special for the zone sizes, since the - * default values that were already configured up above should be - * satisfactory. + * Partially used pages are not usable - thus + * we are rounding upwards: */ - disable_mmu(); -#endif - NODE_DATA(0)->node_mem_map = NULL; - free_area_init_node(0, NODE_DATA(0), zones_size, __MEMORY_START >> PAGE_SHIFT, 0); + start_pfn = PFN_UP(__pa(_end)); -#ifdef CONFIG_DISCONTIGMEM /* - * And for discontig, do some more fixups on the zone sizes.. + * Reserve the kernel text and Reserve the bootmem bitmap. We do + * this in two steps (first step was init_bootmem()), because + * this catches the (definitely buggy) case of us accidentally + * initializing the bootmem allocator with an invalid RAM area. */ - zones_size[ZONE_DMA] = __MEMORY_SIZE_2ND >> PAGE_SHIFT; - zones_size[ZONE_NORMAL] = 0; - free_area_init_node(1, NODE_DATA(1), zones_size, __MEMORY_START_2ND >> PAGE_SHIFT, 0); -#endif + memblock_reserve(start, (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) - start); + + /* + * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET. + */ + if (CONFIG_ZERO_PAGE_OFFSET != 0) + memblock_reserve(zero_base, CONFIG_ZERO_PAGE_OFFSET); + + /* + * Handle additional early reservations + */ + check_for_initrd(); + reserve_crashkernel(); +} + +void __init paging_init(void) +{ + unsigned long max_zone_pfns[MAX_NR_ZONES]; + unsigned long vaddr, end; + int nid; + + sh_mv.mv_mem_init(); + + early_reserve_mem(); + + /* + * Once the early reservations are out of the way, give the + * platforms a chance to kick out some memory. + */ + if (sh_mv.mv_mem_reserve) + sh_mv.mv_mem_reserve(); + + memblock_enforce_memory_limit(memory_limit); + memblock_allow_resize(); + + memblock_dump_all(); + + /* + * Determine low and high memory ranges: + */ + max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT; + min_low_pfn = __MEMORY_START >> PAGE_SHIFT; + + nodes_clear(node_online_map); + + memory_start = (unsigned long)__va(__MEMORY_START); + memory_end = memory_start + (memory_limit ?: memblock_phys_mem_size()); + + uncached_init(); + pmb_init(); + do_init_bootmem(); + ioremap_fixed_init(); + + /* We don't need to map the kernel through the TLB, as + * it is permanatly mapped using P1. So clear the + * entire pgd. */ + memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir)); + + /* Set an initial value for the MMU.TTB so we don't have to + * check for a null value. */ + set_TTB(swapper_pg_dir); + + /* + * Populate the relevant portions of swapper_pg_dir so that + * we can use the fixmap entries without calling kmalloc. + * pte's will be filled in by __set_fixmap(). + */ + vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; + end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK; + page_table_range_init(vaddr, end, swapper_pg_dir); + + kmap_coherent_init(); + + memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); + + for_each_online_node(nid) { + pg_data_t *pgdat = NODE_DATA(nid); + unsigned long low, start_pfn; + + start_pfn = pgdat->bdata->node_min_pfn; + low = pgdat->bdata->node_low_pfn; + + if (max_zone_pfns[ZONE_NORMAL] < low) + max_zone_pfns[ZONE_NORMAL] = low; + + printk("Node %u: start_pfn = 0x%lx, low = 0x%lx\n", + nid, start_pfn, low); + } + + free_area_init_nodes(max_zone_pfns); } +/* + * Early initialization for any I/O MMUs we might have. + */ +static void __init iommu_init(void) +{ + no_iommu_init(); +} + +unsigned int mem_init_done = 0; + void __init mem_init(void) { - extern unsigned long empty_zero_page[1024]; - int codesize, reservedpages, datasize, initsize; - int tmp; - extern unsigned long memory_start; + pg_data_t *pgdat; -#ifdef CONFIG_MMU - high_memory = (void *)__va(MAX_LOW_PFN * PAGE_SIZE); -#else - extern unsigned long memory_end; + iommu_init(); - high_memory = (void *)(memory_end & PAGE_MASK); -#endif + high_memory = NULL; + for_each_online_pgdat(pgdat) + high_memory = max_t(void *, high_memory, + __va(pgdat_end_pfn(pgdat) << PAGE_SHIFT)); + + free_all_bootmem(); - max_mapnr = num_physpages = MAP_NR(high_memory) - MAP_NR(memory_start); + /* Set this up early, so we can take care of the zero page */ + cpu_cache_init(); /* clear the zero-page */ memset(empty_zero_page, 0, PAGE_SIZE); __flush_wback_region(empty_zero_page, PAGE_SIZE); - /* - * Setup wrappers for copy/clear_page(), these will get overridden - * later in the boot process if a better method is available. - */ - copy_page = copy_page_slow; - clear_page = clear_page_slow; + vsyscall_init(); + + mem_init_print_info(NULL); + pr_info("virtual kernel memory layout:\n" + " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" +#ifdef CONFIG_HIGHMEM + " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n" +#endif + " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n" + " lowmem : 0x%08lx - 0x%08lx (%4ld MB) (cached)\n" +#ifdef CONFIG_UNCACHED_MAPPING + " : 0x%08lx - 0x%08lx (%4ld MB) (uncached)\n" +#endif + " .init : 0x%08lx - 0x%08lx (%4ld kB)\n" + " .data : 0x%08lx - 0x%08lx (%4ld kB)\n" + " .text : 0x%08lx - 0x%08lx (%4ld kB)\n", + FIXADDR_START, FIXADDR_TOP, + (FIXADDR_TOP - FIXADDR_START) >> 10, + +#ifdef CONFIG_HIGHMEM + PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, + (LAST_PKMAP*PAGE_SIZE) >> 10, +#endif + + (unsigned long)VMALLOC_START, VMALLOC_END, + (VMALLOC_END - VMALLOC_START) >> 20, - /* this will put all low memory onto the freelists */ - totalram_pages += free_all_bootmem_node(NODE_DATA(0)); -#ifdef CONFIG_DISCONTIGMEM - totalram_pages += free_all_bootmem_node(NODE_DATA(1)); + (unsigned long)memory_start, (unsigned long)high_memory, + ((unsigned long)high_memory - (unsigned long)memory_start) >> 20, + +#ifdef CONFIG_UNCACHED_MAPPING + uncached_start, uncached_end, uncached_size >> 20, #endif - reservedpages = 0; - for (tmp = 0; tmp < num_physpages; tmp++) - /* - * Only count reserved RAM pages - */ - if (PageReserved(mem_map+tmp)) - reservedpages++; - - codesize = (unsigned long) &_etext - (unsigned long) &_text; - datasize = (unsigned long) &_edata - (unsigned long) &_etext; - initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; - - printk("Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init)\n", - (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), - max_mapnr << (PAGE_SHIFT-10), - codesize >> 10, - reservedpages << (PAGE_SHIFT-10), - datasize >> 10, - initsize >> 10); - - p3_cache_init(); + + (unsigned long)&__init_begin, (unsigned long)&__init_end, + ((unsigned long)&__init_end - + (unsigned long)&__init_begin) >> 10, + + (unsigned long)&_etext, (unsigned long)&_edata, + ((unsigned long)&_edata - (unsigned long)&_etext) >> 10, + + (unsigned long)&_text, (unsigned long)&_etext, + ((unsigned long)&_etext - (unsigned long)&_text) >> 10); + + mem_init_done = 1; } void free_initmem(void) { - unsigned long addr; - - addr = (unsigned long)(&__init_begin); - for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) { - ClearPageReserved(virt_to_page(addr)); - set_page_count(virt_to_page(addr), 1); - free_page(addr); - totalram_pages++; - } - printk ("Freeing unused kernel memory: %dk freed\n", (&__init_end - &__init_begin) >> 10); + free_initmem_default(-1); } #ifdef CONFIG_BLK_DEV_INITRD void free_initrd_mem(unsigned long start, unsigned long end) { - unsigned long p; - for (p = start; p < end; p += PAGE_SIZE) { - ClearPageReserved(virt_to_page(p)); - set_page_count(virt_to_page(p), 1); - free_page(p); - totalram_pages++; - } - printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10); + free_reserved_area((void *)start, (void *)end, -1, "initrd"); } #endif +#ifdef CONFIG_MEMORY_HOTPLUG +int arch_add_memory(int nid, u64 start, u64 size) +{ + pg_data_t *pgdat; + unsigned long start_pfn = start >> PAGE_SHIFT; + unsigned long nr_pages = size >> PAGE_SHIFT; + int ret; + + pgdat = NODE_DATA(nid); + + /* We only have ZONE_NORMAL, so this is easy.. */ + ret = __add_pages(nid, pgdat->node_zones + ZONE_NORMAL, + start_pfn, nr_pages); + if (unlikely(ret)) + printk("%s: Failed, __add_pages() == %d\n", __func__, ret); + + return ret; +} +EXPORT_SYMBOL_GPL(arch_add_memory); + +#ifdef CONFIG_NUMA +int memory_add_physaddr_to_nid(u64 addr) +{ + /* Node 0 for now.. */ + return 0; +} +EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); +#endif + +#ifdef CONFIG_MEMORY_HOTREMOVE +int arch_remove_memory(u64 start, u64 size) +{ + unsigned long start_pfn = start >> PAGE_SHIFT; + unsigned long nr_pages = size >> PAGE_SHIFT; + struct zone *zone; + int ret; + + zone = page_zone(pfn_to_page(start_pfn)); + ret = __remove_pages(zone, start_pfn, nr_pages); + if (unlikely(ret)) + pr_warn("%s: Failed, __remove_pages() == %d\n", __func__, + ret); + + return ret; +} +#endif +#endif /* CONFIG_MEMORY_HOTPLUG */ |
