diff options
author | Jens Axboe <jens.axboe@oracle.com> | 2009-12-03 13:49:39 +0100 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2009-12-03 13:49:39 +0100 |
commit | 220d0b1dbf78c6417a658c96e571415552d3abac (patch) | |
tree | 70cd3862540c38ea490e7a27c3c7acc35b680234 /mm | |
parent | 474b18ccc264c472abeec50f48469b6477202699 (diff) | |
parent | 22763c5cf3690a681551162c15d34d935308c8d7 (diff) |
Merge branch 'master' into for-2.6.33
Diffstat (limited to 'mm')
-rw-r--r-- | mm/Kconfig | 5 | ||||
-rw-r--r-- | mm/backing-dev.c | 8 | ||||
-rw-r--r-- | mm/highmem.c | 17 | ||||
-rw-r--r-- | mm/ksm.c | 1 | ||||
-rw-r--r-- | mm/memory_hotplug.c | 24 | ||||
-rw-r--r-- | mm/migrate.c | 2 | ||||
-rw-r--r-- | mm/page_alloc.c | 4 | ||||
-rw-r--r-- | mm/percpu.c | 121 | ||||
-rw-r--r-- | mm/swapfile.c | 3 |
9 files changed, 124 insertions, 61 deletions
diff --git a/mm/Kconfig b/mm/Kconfig index fd3386242cf..44cf6f0a3a6 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -128,12 +128,9 @@ config SPARSEMEM_VMEMMAP config MEMORY_HOTPLUG bool "Allow for memory hot-add" depends on SPARSEMEM || X86_64_ACPI_NUMA - depends on HOTPLUG && !(HIBERNATION && !S390) && ARCH_ENABLE_MEMORY_HOTPLUG + depends on HOTPLUG && ARCH_ENABLE_MEMORY_HOTPLUG depends on (IA64 || X86 || PPC_BOOK3S_64 || SUPERH || S390) -comment "Memory hotplug is currently incompatible with Software Suspend" - depends on SPARSEMEM && HOTPLUG && HIBERNATION && !S390 - config MEMORY_HOTPLUG_SPARSE def_bool y depends on SPARSEMEM && MEMORY_HOTPLUG diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 11aee09dd2a..67a33a5a1a9 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -604,10 +604,14 @@ static void bdi_wb_shutdown(struct backing_dev_info *bdi) /* * Finally, kill the kernel threads. We don't need to be RCU - * safe anymore, since the bdi is gone from visibility. + * safe anymore, since the bdi is gone from visibility. Force + * unfreeze of the thread before calling kthread_stop(), otherwise + * it would never exet if it is currently stuck in the refrigerator. */ - list_for_each_entry(wb, &bdi->wb_list, list) + list_for_each_entry(wb, &bdi->wb_list, list) { + wb->task->flags &= ~PF_FROZEN; kthread_stop(wb->task); + } } /* diff --git a/mm/highmem.c b/mm/highmem.c index 25878cc49da..9c1e627f282 100644 --- a/mm/highmem.c +++ b/mm/highmem.c @@ -426,16 +426,21 @@ void __init page_address_init(void) void debug_kmap_atomic(enum km_type type) { - static unsigned warn_count = 10; + static int warn_count = 10; - if (unlikely(warn_count == 0)) + if (unlikely(warn_count < 0)) return; if (unlikely(in_interrupt())) { - if (in_irq()) { + if (in_nmi()) { + if (type != KM_NMI && type != KM_NMI_PTE) { + WARN_ON(1); + warn_count--; + } + } else if (in_irq()) { if (type != KM_IRQ0 && type != KM_IRQ1 && type != KM_BIO_SRC_IRQ && type != KM_BIO_DST_IRQ && - type != KM_BOUNCE_READ) { + type != KM_BOUNCE_READ && type != KM_IRQ_PTE) { WARN_ON(1); warn_count--; } @@ -452,7 +457,9 @@ void debug_kmap_atomic(enum km_type type) } if (type == KM_IRQ0 || type == KM_IRQ1 || type == KM_BOUNCE_READ || - type == KM_BIO_SRC_IRQ || type == KM_BIO_DST_IRQ) { + type == KM_BIO_SRC_IRQ || type == KM_BIO_DST_IRQ || + type == KM_IRQ_PTE || type == KM_NMI || + type == KM_NMI_PTE ) { if (!irqs_disabled()) { WARN_ON(1); warn_count--; @@ -1012,6 +1012,7 @@ static struct rmap_item *unstable_tree_search_insert(struct page *page, struct rmap_item *tree_rmap_item; int ret; + cond_resched(); tree_rmap_item = rb_entry(*new, struct rmap_item, node); page2[0] = get_mergeable_page(tree_rmap_item); if (!page2[0]) diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 821dee59637..2047465cd27 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -26,6 +26,7 @@ #include <linux/migrate.h> #include <linux/page-isolation.h> #include <linux/pfn.h> +#include <linux/suspend.h> #include <asm/tlbflush.h> @@ -447,7 +448,8 @@ int online_pages(unsigned long pfn, unsigned long nr_pages) } #endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */ -static pg_data_t *hotadd_new_pgdat(int nid, u64 start) +/* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */ +static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start) { struct pglist_data *pgdat; unsigned long zones_size[MAX_NR_ZONES] = {0}; @@ -484,14 +486,18 @@ int __ref add_memory(int nid, u64 start, u64 size) struct resource *res; int ret; + lock_system_sleep(); + res = register_memory_resource(start, size); + ret = -EEXIST; if (!res) - return -EEXIST; + goto out; if (!node_online(nid)) { pgdat = hotadd_new_pgdat(nid, start); + ret = -ENOMEM; if (!pgdat) - return -ENOMEM; + goto out; new_pgdat = 1; } @@ -514,7 +520,8 @@ int __ref add_memory(int nid, u64 start, u64 size) BUG_ON(ret); } - return ret; + goto out; + error: /* rollback pgdat allocation and others */ if (new_pgdat) @@ -522,6 +529,8 @@ error: if (res) release_memory_resource(res); +out: + unlock_system_sleep(); return ret; } EXPORT_SYMBOL_GPL(add_memory); @@ -758,6 +767,8 @@ int offline_pages(unsigned long start_pfn, if (!test_pages_in_a_zone(start_pfn, end_pfn)) return -EINVAL; + lock_system_sleep(); + zone = page_zone(pfn_to_page(start_pfn)); node = zone_to_nid(zone); nr_pages = end_pfn - start_pfn; @@ -765,7 +776,7 @@ int offline_pages(unsigned long start_pfn, /* set above range as isolated */ ret = start_isolate_page_range(start_pfn, end_pfn); if (ret) - return ret; + goto out; arg.start_pfn = start_pfn; arg.nr_pages = nr_pages; @@ -843,6 +854,7 @@ repeat: writeback_set_ratelimit(); memory_notify(MEM_OFFLINE, &arg); + unlock_system_sleep(); return 0; failed_removal: @@ -852,6 +864,8 @@ failed_removal: /* pushback to free area */ undo_isolate_page_range(start_pfn, end_pfn); +out: + unlock_system_sleep(); return ret; } diff --git a/mm/migrate.c b/mm/migrate.c index 1a4bf481378..7dbcb22316d 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -602,7 +602,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private, struct page *newpage = get_new_page(page, private, &result); int rcu_locked = 0; int charge = 0; - struct mem_cgroup *mem; + struct mem_cgroup *mem = NULL; if (!newpage) return -ENOMEM; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index cdcedf66161..2bc2ac63f41 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1769,7 +1769,7 @@ gfp_to_alloc_flags(gfp_t gfp_mask) * See also cpuset_zone_allowed() comment in kernel/cpuset.c. */ alloc_flags &= ~ALLOC_CPUSET; - } else if (unlikely(rt_task(p))) + } else if (unlikely(rt_task(p)) && !in_interrupt()) alloc_flags |= ALLOC_HARDER; if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) { @@ -1817,9 +1817,9 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, if (NUMA_BUILD && (gfp_mask & GFP_THISNODE) == GFP_THISNODE) goto nopage; +restart: wake_all_kswapd(order, zonelist, high_zoneidx); -restart: /* * OK, we're below the kswapd watermark and have kicked background * reclaim. Now things get more complex, so set up alloc_flags according diff --git a/mm/percpu.c b/mm/percpu.c index d90797160c2..5adfc268b40 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -355,62 +355,86 @@ static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr) } /** - * pcpu_extend_area_map - extend area map for allocation - * @chunk: target chunk + * pcpu_need_to_extend - determine whether chunk area map needs to be extended + * @chunk: chunk of interest * - * Extend area map of @chunk so that it can accomodate an allocation. - * A single allocation can split an area into three areas, so this - * function makes sure that @chunk->map has at least two extra slots. + * Determine whether area map of @chunk needs to be extended to + * accomodate a new allocation. * * CONTEXT: - * pcpu_alloc_mutex, pcpu_lock. pcpu_lock is released and reacquired - * if area map is extended. + * pcpu_lock. * * RETURNS: - * 0 if noop, 1 if successfully extended, -errno on failure. + * New target map allocation length if extension is necessary, 0 + * otherwise. */ -static int pcpu_extend_area_map(struct pcpu_chunk *chunk, unsigned long *flags) +static int pcpu_need_to_extend(struct pcpu_chunk *chunk) { int new_alloc; - int *new; - size_t size; - /* has enough? */ if (chunk->map_alloc >= chunk->map_used + 2) return 0; - spin_unlock_irqrestore(&pcpu_lock, *flags); - new_alloc = PCPU_DFL_MAP_ALLOC; while (new_alloc < chunk->map_used + 2) new_alloc *= 2; - new = pcpu_mem_alloc(new_alloc * sizeof(new[0])); - if (!new) { - spin_lock_irqsave(&pcpu_lock, *flags); + return new_alloc; +} + +/** + * pcpu_extend_area_map - extend area map of a chunk + * @chunk: chunk of interest + * @new_alloc: new target allocation length of the area map + * + * Extend area map of @chunk to have @new_alloc entries. + * + * CONTEXT: + * Does GFP_KERNEL allocation. Grabs and releases pcpu_lock. + * + * RETURNS: + * 0 on success, -errno on failure. + */ +static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc) +{ + int *old = NULL, *new = NULL; + size_t old_size = 0, new_size = new_alloc * sizeof(new[0]); + unsigned long flags; + + new = pcpu_mem_alloc(new_size); + if (!new) return -ENOMEM; - } - /* - * Acquire pcpu_lock and switch to new area map. Only free - * could have happened inbetween, so map_used couldn't have - * grown. - */ - spin_lock_irqsave(&pcpu_lock, *flags); - BUG_ON(new_alloc < chunk->map_used + 2); + /* acquire pcpu_lock and switch to new area map */ + spin_lock_irqsave(&pcpu_lock, flags); + + if (new_alloc <= chunk->map_alloc) + goto out_unlock; - size = chunk->map_alloc * sizeof(chunk->map[0]); - memcpy(new, chunk->map, size); + old_size = chunk->map_alloc * sizeof(chunk->map[0]); + memcpy(new, chunk->map, old_size); /* * map_alloc < PCPU_DFL_MAP_ALLOC indicates that the chunk is * one of the first chunks and still using static map. */ if (chunk->map_alloc >= PCPU_DFL_MAP_ALLOC) - pcpu_mem_free(chunk->map, size); + old = chunk->map; chunk->map_alloc = new_alloc; chunk->map = new; + new = NULL; + +out_unlock: + spin_unlock_irqrestore(&pcpu_lock, flags); + + /* + * pcpu_mem_free() might end up calling vfree() which uses + * IRQ-unsafe lock and thus can't be called under pcpu_lock. + */ + pcpu_mem_free(old, old_size); + pcpu_mem_free(new, new_size); + return 0; } @@ -1049,7 +1073,7 @@ static void *pcpu_alloc(size_t size, size_t align, bool reserved) static int warn_limit = 10; struct pcpu_chunk *chunk; const char *err; - int slot, off; + int slot, off, new_alloc; unsigned long flags; if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) { @@ -1064,14 +1088,25 @@ static void *pcpu_alloc(size_t size, size_t align, bool reserved) /* serve reserved allocations from the reserved chunk if available */ if (reserved && pcpu_reserved_chunk) { chunk = pcpu_reserved_chunk; - if (size > chunk->contig_hint || - pcpu_extend_area_map(chunk, &flags) < 0) { - err = "failed to extend area map of reserved chunk"; + + if (size > chunk->contig_hint) { + err = "alloc from reserved chunk failed"; goto fail_unlock; } + + while ((new_alloc = pcpu_need_to_extend(chunk))) { + spin_unlock_irqrestore(&pcpu_lock, flags); + if (pcpu_extend_area_map(chunk, new_alloc) < 0) { + err = "failed to extend area map of reserved chunk"; + goto fail_unlock_mutex; + } + spin_lock_irqsave(&pcpu_lock, flags); + } + off = pcpu_alloc_area(chunk, size, align); if (off >= 0) goto area_found; + err = "alloc from reserved chunk failed"; goto fail_unlock; } @@ -1083,14 +1118,20 @@ restart: if (size > chunk->contig_hint) continue; - switch (pcpu_extend_area_map(chunk, &flags)) { - case 0: - break; - case 1: - goto restart; /* pcpu_lock dropped, restart */ - default: - err = "failed to extend area map"; - goto fail_unlock; + new_alloc = pcpu_need_to_extend(chunk); + if (new_alloc) { + spin_unlock_irqrestore(&pcpu_lock, flags); + if (pcpu_extend_area_map(chunk, + new_alloc) < 0) { + err = "failed to extend area map"; + goto fail_unlock_mutex; + } + spin_lock_irqsave(&pcpu_lock, flags); + /* + * pcpu_lock has been dropped, need to + * restart cpu_slot list walking. + */ + goto restart; } off = pcpu_alloc_area(chunk, size, align); diff --git a/mm/swapfile.c b/mm/swapfile.c index a1bc6b9af9a..9c590eef791 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -1151,8 +1151,7 @@ static int try_to_unuse(unsigned int type) } else retval = unuse_mm(mm, entry, page); - if (set_start_mm && - swap_count(*swap_map) < swcount) { + if (set_start_mm && *swap_map < swcount) { mmput(new_start_mm); atomic_inc(&mm->mm_users); new_start_mm = mm; |