From 93278814d3590eba0ee360b8d69a35c7f2203ea8 Mon Sep 17 00:00:00 2001 From: Sasha Levin Date: Thu, 10 May 2012 13:01:44 -0700 Subject: mm: fix division by 0 in percpu_pagelist_fraction() percpu_pagelist_fraction_sysctl_handler() has only considered -EINVAL as a possible error from proc_dointvec_minmax(). If any other error is returned, it would proceed to divide by zero since percpu_pagelist_fraction wasn't getting initialized at any point. For example, writing 0 bytes into the proc file would trigger the issue. Signed-off-by: Sasha Levin Reviewed-by: Minchan Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page_alloc.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/page_alloc.c b/mm/page_alloc.c index a712fb9e04c..b21b3db15a7 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -105,7 +105,7 @@ unsigned long totalreserve_pages __read_mostly; */ unsigned long dirty_balance_reserve __read_mostly; -int percpu_pagelist_fraction; +int percpu_pagelist_fraction = 8; gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK; #ifdef CONFIG_PM_SLEEP @@ -5203,7 +5203,7 @@ int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write, int ret; ret = proc_dointvec_minmax(table, write, buffer, length, ppos); - if (!write || (ret == -EINVAL)) + if (!write || (ret < 0)) return ret; for_each_populated_zone(zone) { for_each_possible_cpu(cpu) { -- cgit v1.2.3-18-g5258 From 4998a6c0edce7fae9c0a5463f6ec3fa585258ee7 Mon Sep 17 00:00:00 2001 From: Chris Metcalf Date: Thu, 10 May 2012 13:01:44 -0700 Subject: hugetlb: prevent BUG_ON in hugetlb_fault() -> hugetlb_cow() Commit 66aebce747eaf ("hugetlb: fix race condition in hugetlb_fault()") added code to avoid a race condition by elevating the page refcount in hugetlb_fault() while calling hugetlb_cow(). However, one code path in hugetlb_cow() includes an assertion that the page count is 1, whereas it may now also have the value 2 in this path. The consensus is that this BUG_ON has served its purpose, so rather than extending it to cover both cases, we just remove it. Signed-off-by: Chris Metcalf Acked-by: Mel Gorman Acked-by: Hillf Danton Acked-by: Hugh Dickins Cc: Michal Hocko Cc: KAMEZAWA Hiroyuki Cc: [3.0.29+, 3.2.16+, 3.3.3+] Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/hugetlb.c | 1 - 1 file changed, 1 deletion(-) (limited to 'mm') diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 5a16423a512..ae8f708e3d7 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -2498,7 +2498,6 @@ retry_avoidcopy: if (outside_reserve) { BUG_ON(huge_pte_none(pte)); if (unmap_ref_private(mm, vma, old_page, address)) { - BUG_ON(page_count(old_page) != 1); BUG_ON(huge_pte_none(pte)); spin_lock(&mm->page_table_lock); ptep = huge_pte_offset(mm, address & huge_page_mask(h)); -- cgit v1.2.3-18-g5258 From 8c7577637ca31385e92769a77e2ab5b428e8b99c Mon Sep 17 00:00:00 2001 From: Sha Zhengju Date: Thu, 10 May 2012 13:01:45 -0700 Subject: memcg: free spare array to avoid memory leak When the last event is unregistered, there is no need to keep the spare array anymore. So free it to avoid memory leak. Signed-off-by: Sha Zhengju Acked-by: KAMEZAWA Hiroyuki Reviewed-by: Kirill A. Shutemov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 31ab9c3f017..b659260c56a 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -4507,6 +4507,12 @@ static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp, swap_buffers: /* Swap primary and spare array */ thresholds->spare = thresholds->primary; + /* If all events are unregistered, free the spare array */ + if (!new) { + kfree(thresholds->spare); + thresholds->spare = NULL; + } + rcu_assign_pointer(thresholds->primary, new); /* To be sure that nobody uses thresholds */ -- cgit v1.2.3-18-g5258 From 6bc2e853c6b46a6041980d58200ad9b0a73a60ff Mon Sep 17 00:00:00 2001 From: Russ Anderson Date: Thu, 10 May 2012 13:01:46 -0700 Subject: mm: nobootmem: fix sign extend problem in __free_pages_memory() Systems with 8 TBytes of memory or greater can hit a problem where only the the first 8 TB of memory shows up. This is due to "int i" being smaller than "unsigned long start_aligned", causing the high bits to be dropped. The fix is to change `i' to unsigned long to match start_aligned and end_aligned. Thanks to Jack Steiner for assistance tracking this down. Signed-off-by: Russ Anderson Cc: Jack Steiner Cc: Johannes Weiner Cc: Tejun Heo Cc: David S. Miller Cc: Yinghai Lu Cc: Gavin Shan Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/nobootmem.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/nobootmem.c b/mm/nobootmem.c index e53bb8a256b..1983fb1c702 100644 --- a/mm/nobootmem.c +++ b/mm/nobootmem.c @@ -82,8 +82,7 @@ void __init free_bootmem_late(unsigned long addr, unsigned long size) static void __init __free_pages_memory(unsigned long start, unsigned long end) { - int i; - unsigned long start_aligned, end_aligned; + unsigned long i, start_aligned, end_aligned; int order = ilog2(BITS_PER_LONG); start_aligned = (start + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1); -- cgit v1.2.3-18-g5258