diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/filemap.c | 32 | ||||
-rw-r--r-- | mm/madvise.c | 3 | ||||
-rw-r--r-- | mm/memory_hotplug.c | 6 | ||||
-rw-r--r-- | mm/mempolicy.c | 1 | ||||
-rw-r--r-- | mm/migrate.c | 11 | ||||
-rw-r--r-- | mm/oom_kill.c | 71 | ||||
-rw-r--r-- | mm/page_alloc.c | 12 | ||||
-rw-r--r-- | mm/shmem.c | 3 | ||||
-rw-r--r-- | mm/slab.c | 5 | ||||
-rw-r--r-- | mm/slob.c | 10 | ||||
-rw-r--r-- | mm/sparse.c | 9 | ||||
-rw-r--r-- | mm/vmscan.c | 2 |
12 files changed, 115 insertions, 50 deletions
diff --git a/mm/filemap.c b/mm/filemap.c index 3ef20739e72..fd57442186c 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -697,6 +697,38 @@ unsigned find_get_pages(struct address_space *mapping, pgoff_t start, return ret; } +/** + * find_get_pages_contig - gang contiguous pagecache lookup + * @mapping: The address_space to search + * @index: The starting page index + * @nr_pages: The maximum number of pages + * @pages: Where the resulting pages are placed + * + * find_get_pages_contig() works exactly like find_get_pages(), except + * that the returned number of pages are guaranteed to be contiguous. + * + * find_get_pages_contig() returns the number of pages which were found. + */ +unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index, + unsigned int nr_pages, struct page **pages) +{ + unsigned int i; + unsigned int ret; + + read_lock_irq(&mapping->tree_lock); + ret = radix_tree_gang_lookup(&mapping->page_tree, + (void **)pages, index, nr_pages); + for (i = 0; i < ret; i++) { + if (pages[i]->mapping == NULL || pages[i]->index != index) + break; + + page_cache_get(pages[i]); + index++; + } + read_unlock_irq(&mapping->tree_lock); + return i; +} + /* * Like find_get_pages, except we only return pages which are tagged with * `tag'. We update *index to index the next page for the traversal. diff --git a/mm/madvise.c b/mm/madvise.c index af3d573b014..4e196155a0c 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -168,6 +168,9 @@ static long madvise_remove(struct vm_area_struct *vma, return -EINVAL; } + if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE)) + return -EACCES; + mapping = vma->vm_file->f_mapping; offset = (loff_t)(start - vma->vm_start) diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 1fe76d963ac..1ae2b2cc3a5 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -69,12 +69,16 @@ int __add_pages(struct zone *zone, unsigned long phys_start_pfn, for (i = 0; i < nr_pages; i += PAGES_PER_SECTION) { err = __add_section(zone, phys_start_pfn + i); - if (err) + /* We want to keep adding the rest of the + * sections if the first ones already exist + */ + if (err && (err != -EEXIST)) break; } return err; } +EXPORT_SYMBOL_GPL(__add_pages); static void grow_zone_span(struct zone *zone, unsigned long start_pfn, unsigned long end_pfn) diff --git a/mm/mempolicy.c b/mm/mempolicy.c index dec8249e972..8778f58880c 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1761,7 +1761,6 @@ static void gather_stats(struct page *page, void *private, int pte_dirty) md->mapcount_max = count; md->node[page_to_nid(page)]++; - cond_resched(); } #ifdef CONFIG_HUGETLB_PAGE diff --git a/mm/migrate.c b/mm/migrate.c index d444229f259..1c25040693d 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -439,6 +439,17 @@ redo: goto unlock_both; } + /* Make sure the dirty bit is up to date */ + if (try_to_unmap(page, 1) == SWAP_FAIL) { + rc = -EPERM; + goto unlock_both; + } + + if (page_mapcount(page)) { + rc = -EAGAIN; + goto unlock_both; + } + /* * Default handling if a filesystem does not provide * a migration function. We can only migrate clean diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 78747afad6b..042e6436c3e 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -46,15 +46,25 @@ unsigned long badness(struct task_struct *p, unsigned long uptime) { unsigned long points, cpu_time, run_time, s; - struct list_head *tsk; + struct mm_struct *mm; + struct task_struct *child; - if (!p->mm) + task_lock(p); + mm = p->mm; + if (!mm) { + task_unlock(p); return 0; + } /* * The memory size of the process is the basis for the badness. */ - points = p->mm->total_vm; + points = mm->total_vm; + + /* + * After this unlock we can no longer dereference local variable `mm' + */ + task_unlock(p); /* * Processes which fork a lot of child processes are likely @@ -64,11 +74,11 @@ unsigned long badness(struct task_struct *p, unsigned long uptime) * child is eating the vast majority of memory, adding only half * to the parents will make the child our kill candidate of choice. */ - list_for_each(tsk, &p->children) { - struct task_struct *chld; - chld = list_entry(tsk, struct task_struct, sibling); - if (chld->mm != p->mm && chld->mm) - points += chld->mm->total_vm/2 + 1; + list_for_each_entry(child, &p->children, sibling) { + task_lock(child); + if (child->mm != mm && child->mm) + points += child->mm->total_vm/2 + 1; + task_unlock(child); } /* @@ -244,17 +254,24 @@ static void __oom_kill_task(task_t *p, const char *message) force_sig(SIGKILL, p); } -static struct mm_struct *oom_kill_task(task_t *p, const char *message) +static int oom_kill_task(task_t *p, const char *message) { - struct mm_struct *mm = get_task_mm(p); + struct mm_struct *mm; task_t * g, * q; - if (!mm) - return NULL; - if (mm == &init_mm) { - mmput(mm); - return NULL; - } + mm = p->mm; + + /* WARNING: mm may not be dereferenced since we did not obtain its + * value from get_task_mm(p). This is OK since all we need to do is + * compare mm to q->mm below. + * + * Furthermore, even if mm contains a non-NULL value, p->mm may + * change to NULL at any time since we do not hold task_lock(p). + * However, this is of no concern to us. + */ + + if (mm == NULL || mm == &init_mm) + return 1; __oom_kill_task(p, message); /* @@ -266,13 +283,12 @@ static struct mm_struct *oom_kill_task(task_t *p, const char *message) __oom_kill_task(q, message); while_each_thread(g, q); - return mm; + return 0; } -static struct mm_struct *oom_kill_process(struct task_struct *p, - unsigned long points, const char *message) +static int oom_kill_process(struct task_struct *p, unsigned long points, + const char *message) { - struct mm_struct *mm; struct task_struct *c; struct list_head *tsk; @@ -283,9 +299,8 @@ static struct mm_struct *oom_kill_process(struct task_struct *p, c = list_entry(tsk, struct task_struct, sibling); if (c->mm == p->mm) continue; - mm = oom_kill_task(c, message); - if (mm) - return mm; + if (!oom_kill_task(c, message)) + return 0; } return oom_kill_task(p, message); } @@ -300,7 +315,6 @@ static struct mm_struct *oom_kill_process(struct task_struct *p, */ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order) { - struct mm_struct *mm = NULL; task_t *p; unsigned long points = 0; @@ -320,12 +334,12 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order) */ switch (constrained_alloc(zonelist, gfp_mask)) { case CONSTRAINT_MEMORY_POLICY: - mm = oom_kill_process(current, points, + oom_kill_process(current, points, "No available memory (MPOL_BIND)"); break; case CONSTRAINT_CPUSET: - mm = oom_kill_process(current, points, + oom_kill_process(current, points, "No available memory in cpuset"); break; @@ -347,8 +361,7 @@ retry: panic("Out of memory and no killable processes...\n"); } - mm = oom_kill_process(p, points, "Out of memory"); - if (!mm) + if (oom_kill_process(p, points, "Out of memory")) goto retry; break; @@ -357,8 +370,6 @@ retry: out: read_unlock(&tasklist_lock); cpuset_unlock(); - if (mm) - mmput(mm); /* * Give "p" a good chance of killing itself before we diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 97d6827c7d6..ea77c999047 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -232,11 +232,13 @@ static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags) * zone->lock is already acquired when we use these. * So, we don't need atomic page->flags operations here. */ -static inline unsigned long page_order(struct page *page) { +static inline unsigned long page_order(struct page *page) +{ return page_private(page); } -static inline void set_page_order(struct page *page, int order) { +static inline void set_page_order(struct page *page, int order) +{ set_page_private(page, order); __SetPageBuddy(page); } @@ -299,9 +301,9 @@ static inline int page_is_buddy(struct page *page, int order) if (PageBuddy(page) && page_order(page) == order) { BUG_ON(page_count(page) != 0); - return 1; + return 1; } - return 0; + return 0; } /* @@ -1960,7 +1962,7 @@ static inline void free_zone_pagesets(int cpu) } } -static int __cpuinit pageset_cpuup_callback(struct notifier_block *nfb, +static int pageset_cpuup_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { diff --git a/mm/shmem.c b/mm/shmem.c index 37eaf42ed2c..4c5e68e4e9a 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -46,6 +46,8 @@ #include <linux/mempolicy.h> #include <linux/namei.h> #include <linux/ctype.h> +#include <linux/migrate.h> + #include <asm/uaccess.h> #include <asm/div64.h> #include <asm/pgtable.h> @@ -2173,6 +2175,7 @@ static struct address_space_operations shmem_aops = { .prepare_write = shmem_prepare_write, .commit_write = simple_commit_write, #endif + .migratepage = migrate_page, }; static struct file_operations shmem_file_operations = { diff --git a/mm/slab.c b/mm/slab.c index e6ef9bd5233..c32af7e7581 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -979,7 +979,8 @@ static void __drain_alien_cache(struct kmem_cache *cachep, * That way we could avoid the overhead of putting the objects * into the free lists and getting them back later. */ - transfer_objects(rl3->shared, ac, ac->limit); + if (rl3->shared) + transfer_objects(rl3->shared, ac, ac->limit); free_block(cachep, ac->entry, ac->avail, node); ac->avail = 0; @@ -1036,7 +1037,7 @@ static inline void free_alien_cache(struct array_cache **ac_ptr) #endif -static int __devinit cpuup_callback(struct notifier_block *nfb, +static int cpuup_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { long cpu = (long)hcpu; diff --git a/mm/slob.c b/mm/slob.c index 9bcc7e2cabf..a68255ba455 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -354,9 +354,7 @@ void *__alloc_percpu(size_t size) if (!pdata) return NULL; - for (i = 0; i < NR_CPUS; i++) { - if (!cpu_possible(i)) - continue; + for_each_possible_cpu(i) { pdata->ptrs[i] = kmalloc(size, GFP_KERNEL); if (!pdata->ptrs[i]) goto unwind_oom; @@ -383,11 +381,9 @@ free_percpu(const void *objp) int i; struct percpu_data *p = (struct percpu_data *) (~(unsigned long) objp); - for (i = 0; i < NR_CPUS; i++) { - if (!cpu_possible(i)) - continue; + for_each_possible_cpu(i) kfree(p->ptrs[i]); - } + kfree(p); } EXPORT_SYMBOL(free_percpu); diff --git a/mm/sparse.c b/mm/sparse.c index 0a51f36ba3a..d7c32de99ee 100644 --- a/mm/sparse.c +++ b/mm/sparse.c @@ -32,7 +32,10 @@ static struct mem_section *sparse_index_alloc(int nid) unsigned long array_size = SECTIONS_PER_ROOT * sizeof(struct mem_section); - section = alloc_bootmem_node(NODE_DATA(nid), array_size); + if (system_state == SYSTEM_RUNNING) + section = kmalloc_node(array_size, GFP_KERNEL, nid); + else + section = alloc_bootmem_node(NODE_DATA(nid), array_size); if (section) memset(section, 0, array_size); @@ -281,9 +284,9 @@ int sparse_add_one_section(struct zone *zone, unsigned long start_pfn, ret = sparse_init_one_section(ms, section_nr, memmap); - if (ret <= 0) - __kfree_section_memmap(memmap, nr_pages); out: pgdat_resize_unlock(pgdat, &flags); + if (ret <= 0) + __kfree_section_memmap(memmap, nr_pages); return ret; } diff --git a/mm/vmscan.c b/mm/vmscan.c index acdf001d694..4649a63a8cb 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1328,7 +1328,7 @@ repeat: not required for correctness. So if the last cpu in a node goes away, we get changed to run anywhere: as the first one comes back, restore their cpu bindings. */ -static int __devinit cpu_callback(struct notifier_block *nfb, +static int cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { pg_data_t *pgdat; |