aboutsummaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/Kconfig2
-rw-r--r--mm/memcontrol.c31
-rw-r--r--mm/migrate.c2
-rw-r--r--mm/page_alloc.c18
-rw-r--r--mm/pgtable-generic.c1
-rw-r--r--mm/vmscan.c3
6 files changed, 41 insertions, 16 deletions
diff --git a/mm/Kconfig b/mm/Kconfig
index 3ad483bdf50..e9c0c61f2dd 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -179,7 +179,7 @@ config SPLIT_PTLOCK_CPUS
config COMPACTION
bool "Allow for memory compaction"
select MIGRATION
- depends on EXPERIMENTAL && HUGETLB_PAGE && MMU
+ depends on MMU
help
Allows the compaction of memory for the allocation of huge pages.
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index db76ef72629..3878cfe399d 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1832,6 +1832,7 @@ static int __mem_cgroup_do_charge(struct mem_cgroup *mem, gfp_t gfp_mask,
if (likely(!ret))
return CHARGE_OK;
+ res_counter_uncharge(&mem->res, csize);
mem_over_limit = mem_cgroup_from_res_counter(fail_res, memsw);
flags |= MEM_CGROUP_RECLAIM_NOSWAP;
} else
@@ -2144,6 +2145,8 @@ void mem_cgroup_split_huge_fixup(struct page *head, struct page *tail)
struct page_cgroup *tail_pc = lookup_page_cgroup(tail);
unsigned long flags;
+ if (mem_cgroup_disabled())
+ return;
/*
* We have no races with charge/uncharge but will have races with
* page state accounting.
@@ -2233,7 +2236,12 @@ static int mem_cgroup_move_account(struct page_cgroup *pc,
{
int ret = -EINVAL;
unsigned long flags;
-
+ /*
+ * The page is isolated from LRU. So, collapse function
+ * will not handle this page. But page splitting can happen.
+ * Do this check under compound_page_lock(). The caller should
+ * hold it.
+ */
if ((charge_size > PAGE_SIZE) && !PageTransHuge(pc->page))
return -EBUSY;
@@ -2265,7 +2273,7 @@ static int mem_cgroup_move_parent(struct page_cgroup *pc,
struct cgroup *cg = child->css.cgroup;
struct cgroup *pcg = cg->parent;
struct mem_cgroup *parent;
- int charge = PAGE_SIZE;
+ int page_size = PAGE_SIZE;
unsigned long flags;
int ret;
@@ -2278,23 +2286,26 @@ static int mem_cgroup_move_parent(struct page_cgroup *pc,
goto out;
if (isolate_lru_page(page))
goto put;
- /* The page is isolated from LRU and we have no race with splitting */
- charge = PAGE_SIZE << compound_order(page);
+
+ if (PageTransHuge(page))
+ page_size = HPAGE_SIZE;
parent = mem_cgroup_from_cont(pcg);
- ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false, charge);
+ ret = __mem_cgroup_try_charge(NULL, gfp_mask,
+ &parent, false, page_size);
if (ret || !parent)
goto put_back;
- if (charge > PAGE_SIZE)
+ if (page_size > PAGE_SIZE)
flags = compound_lock_irqsave(page);
- ret = mem_cgroup_move_account(pc, child, parent, true, charge);
+ ret = mem_cgroup_move_account(pc, child, parent, true, page_size);
if (ret)
- mem_cgroup_cancel_charge(parent, charge);
-put_back:
- if (charge > PAGE_SIZE)
+ mem_cgroup_cancel_charge(parent, page_size);
+
+ if (page_size > PAGE_SIZE)
compound_unlock_irqrestore(page, flags);
+put_back:
putback_lru_page(page);
put:
put_page(page);
diff --git a/mm/migrate.c b/mm/migrate.c
index 46fe8cc13d6..9f29a3b7aac 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -888,7 +888,7 @@ out:
* are movable anymore because to has become empty
* or no retryable pages exist anymore.
* Caller should call putback_lru_pages to return pages to the LRU
- * or free list.
+ * or free list only if ret != 0.
*
* Return: Number of pages not migrated or error code.
*/
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 90c1439549f..a873e61e312 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1088,8 +1088,10 @@ static void drain_pages(unsigned int cpu)
pset = per_cpu_ptr(zone->pageset, cpu);
pcp = &pset->pcp;
- free_pcppages_bulk(zone, pcp->count, pcp);
- pcp->count = 0;
+ if (pcp->count) {
+ free_pcppages_bulk(zone, pcp->count, pcp);
+ pcp->count = 0;
+ }
local_irq_restore(flags);
}
}
@@ -2034,6 +2036,14 @@ restart:
*/
alloc_flags = gfp_to_alloc_flags(gfp_mask);
+ /*
+ * Find the true preferred zone if the allocation is unconstrained by
+ * cpusets.
+ */
+ if (!(alloc_flags & ALLOC_CPUSET) && !nodemask)
+ first_zones_zonelist(zonelist, high_zoneidx, NULL,
+ &preferred_zone);
+
/* This is the last chance, in general, before the goto nopage. */
page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS,
@@ -2192,7 +2202,9 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
get_mems_allowed();
/* The preferred zone is used for statistics later */
- first_zones_zonelist(zonelist, high_zoneidx, nodemask, &preferred_zone);
+ first_zones_zonelist(zonelist, high_zoneidx,
+ nodemask ? : &cpuset_current_mems_allowed,
+ &preferred_zone);
if (!preferred_zone) {
put_mems_allowed();
return NULL;
diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c
index 0369f5b3ba1..eb663fb533e 100644
--- a/mm/pgtable-generic.c
+++ b/mm/pgtable-generic.c
@@ -6,6 +6,7 @@
* Copyright (C) 2010 Linus Torvalds
*/
+#include <linux/pagemap.h>
#include <asm/tlb.h>
#include <asm-generic/pgtable.h>
diff --git a/mm/vmscan.c b/mm/vmscan.c
index f5d90dedebb..148c6e630df 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2083,7 +2083,8 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
struct zone *preferred_zone;
first_zones_zonelist(zonelist, gfp_zone(sc->gfp_mask),
- NULL, &preferred_zone);
+ &cpuset_current_mems_allowed,
+ &preferred_zone);
wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/10);
}
}