aboutsummaryrefslogtreecommitdiff
path: root/mm/memcontrol.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r--mm/memcontrol.c93
1 files changed, 53 insertions, 40 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 7973b5221fb..0f711c213d2 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -811,10 +811,12 @@ int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
* enabled in "curr" and "curr" is a child of "mem" in *cgroup*
* hierarchy(even if use_hierarchy is disabled in "mem").
*/
+ rcu_read_lock();
if (mem->use_hierarchy)
ret = css_is_ancestor(&curr->css, &mem->css);
else
ret = (curr == mem);
+ rcu_read_unlock();
css_put(&curr->css);
return ret;
}
@@ -1359,16 +1361,19 @@ void mem_cgroup_update_file_mapped(struct page *page, int val)
lock_page_cgroup(pc);
mem = pc->mem_cgroup;
- if (!mem)
- goto done;
-
- if (!PageCgroupUsed(pc))
+ if (!mem || !PageCgroupUsed(pc))
goto done;
/*
* Preemption is already disabled. We can use __this_cpu_xxx
*/
- __this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_FILE_MAPPED], val);
+ if (val > 0) {
+ __this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
+ SetPageCgroupFileMapped(pc);
+ } else {
+ __this_cpu_dec(mem->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
+ ClearPageCgroupFileMapped(pc);
+ }
done:
unlock_page_cgroup(pc);
@@ -1801,16 +1806,13 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
static void __mem_cgroup_move_account(struct page_cgroup *pc,
struct mem_cgroup *from, struct mem_cgroup *to, bool uncharge)
{
- struct page *page;
-
VM_BUG_ON(from == to);
VM_BUG_ON(PageLRU(pc->page));
VM_BUG_ON(!PageCgroupLocked(pc));
VM_BUG_ON(!PageCgroupUsed(pc));
VM_BUG_ON(pc->mem_cgroup != from);
- page = pc->page;
- if (page_mapped(page) && !PageAnon(page)) {
+ if (PageCgroupFileMapped(pc)) {
/* Update mapped_file data for mem_cgroup */
preempt_disable();
__this_cpu_dec(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
@@ -2312,7 +2314,9 @@ mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
/* record memcg information */
if (do_swap_account && swapout && memcg) {
+ rcu_read_lock();
swap_cgroup_record(ent, css_id(&memcg->css));
+ rcu_read_unlock();
mem_cgroup_get(memcg);
}
if (swapout && memcg)
@@ -2369,8 +2373,10 @@ static int mem_cgroup_move_swap_account(swp_entry_t entry,
{
unsigned short old_id, new_id;
+ rcu_read_lock();
old_id = css_id(&from->css);
new_id = css_id(&to->css);
+ rcu_read_unlock();
if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
mem_cgroup_swap_statistics(from, false);
@@ -2429,11 +2435,11 @@ int mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr)
}
unlock_page_cgroup(pc);
+ *ptr = mem;
if (mem) {
- ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false);
+ ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, ptr, false);
css_put(&mem->css);
}
- *ptr = mem;
return ret;
}
@@ -3691,8 +3697,10 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
else
mem = vmalloc(size);
- if (mem)
- memset(mem, 0, size);
+ if (!mem)
+ return NULL;
+
+ memset(mem, 0, size);
mem->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
if (!mem->stat) {
if (size < PAGE_SIZE)
@@ -3946,28 +3954,6 @@ one_by_one:
}
return ret;
}
-#else /* !CONFIG_MMU */
-static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
- struct cgroup *cgroup,
- struct task_struct *p,
- bool threadgroup)
-{
- return 0;
-}
-static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss,
- struct cgroup *cgroup,
- struct task_struct *p,
- bool threadgroup)
-{
-}
-static void mem_cgroup_move_task(struct cgroup_subsys *ss,
- struct cgroup *cont,
- struct cgroup *old_cont,
- struct task_struct *p,
- bool threadgroup)
-{
-}
-#endif
/**
* is_target_pte_for_mc - check a pte whether it is valid for move charge
@@ -4058,11 +4044,16 @@ static int is_target_pte_for_mc(struct vm_area_struct *vma,
put_page(page);
}
/* throught */
- if (ent.val && do_swap_account && !ret &&
- css_id(&mc.from->css) == lookup_swap_cgroup(ent)) {
- ret = MC_TARGET_SWAP;
- if (target)
- target->ent = ent;
+ if (ent.val && do_swap_account && !ret) {
+ unsigned short id;
+ rcu_read_lock();
+ id = css_id(&mc.from->css);
+ rcu_read_unlock();
+ if (id == lookup_swap_cgroup(ent)) {
+ ret = MC_TARGET_SWAP;
+ if (target)
+ target->ent = ent;
+ }
}
return ret;
}
@@ -4330,6 +4321,28 @@ static void mem_cgroup_move_task(struct cgroup_subsys *ss,
}
mem_cgroup_clear_mc();
}
+#else /* !CONFIG_MMU */
+static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
+ struct cgroup *cgroup,
+ struct task_struct *p,
+ bool threadgroup)
+{
+ return 0;
+}
+static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss,
+ struct cgroup *cgroup,
+ struct task_struct *p,
+ bool threadgroup)
+{
+}
+static void mem_cgroup_move_task(struct cgroup_subsys *ss,
+ struct cgroup *cont,
+ struct cgroup *old_cont,
+ struct task_struct *p,
+ bool threadgroup)
+{
+}
+#endif
struct cgroup_subsys mem_cgroup_subsys = {
.name = "memory",