aboutsummaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorRaghavendra K T <raghavendra.kt@linux.vnet.ibm.com>2011-11-02 13:38:15 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2011-11-02 16:06:59 -0700
commitc0ff4b8540a5c158b8e5bafb7d767298b67b0b92 (patch)
treea47a2bcd0b7b80056cde7ba6b1263aae78f77212 /mm
parentff7ee93f47151e23601856e7eb5510babf956571 (diff)
memcg: rename mem variable to memcg
The memcg code sometimes uses "struct mem_cgroup *mem" and sometimes uses "struct mem_cgroup *memcg". Rename all mem variables to memcg in source file. Signed-off-by: Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com> Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Acked-by: Michal Hocko <mhocko@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memcontrol.c930
1 files changed, 467 insertions, 463 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 2d5755544af..9e38abdbfd9 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -201,8 +201,8 @@ struct mem_cgroup_eventfd_list {
struct eventfd_ctx *eventfd;
};
-static void mem_cgroup_threshold(struct mem_cgroup *mem);
-static void mem_cgroup_oom_notify(struct mem_cgroup *mem);
+static void mem_cgroup_threshold(struct mem_cgroup *memcg);
+static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
/*
* The memory controller data structure. The memory controller controls both
@@ -362,29 +362,29 @@ enum charge_type {
#define MEM_CGROUP_RECLAIM_SOFT_BIT 0x2
#define MEM_CGROUP_RECLAIM_SOFT (1 << MEM_CGROUP_RECLAIM_SOFT_BIT)
-static void mem_cgroup_get(struct mem_cgroup *mem);
-static void mem_cgroup_put(struct mem_cgroup *mem);
-static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem);
-static void drain_all_stock_async(struct mem_cgroup *mem);
+static void mem_cgroup_get(struct mem_cgroup *memcg);
+static void mem_cgroup_put(struct mem_cgroup *memcg);
+static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg);
+static void drain_all_stock_async(struct mem_cgroup *memcg);
static struct mem_cgroup_per_zone *
-mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid)
+mem_cgroup_zoneinfo(struct mem_cgroup *memcg, int nid, int zid)
{
- return &mem->info.nodeinfo[nid]->zoneinfo[zid];
+ return &memcg->info.nodeinfo[nid]->zoneinfo[zid];
}
-struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem)
+struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg)
{
- return &mem->css;
+ return &memcg->css;
}
static struct mem_cgroup_per_zone *
-page_cgroup_zoneinfo(struct mem_cgroup *mem, struct page *page)
+page_cgroup_zoneinfo(struct mem_cgroup *memcg, struct page *page)
{
int nid = page_to_nid(page);
int zid = page_zonenum(page);
- return mem_cgroup_zoneinfo(mem, nid, zid);
+ return mem_cgroup_zoneinfo(memcg, nid, zid);
}
static struct mem_cgroup_tree_per_zone *
@@ -403,7 +403,7 @@ soft_limit_tree_from_page(struct page *page)
}
static void
-__mem_cgroup_insert_exceeded(struct mem_cgroup *mem,
+__mem_cgroup_insert_exceeded(struct mem_cgroup *memcg,
struct mem_cgroup_per_zone *mz,
struct mem_cgroup_tree_per_zone *mctz,
unsigned long long new_usage_in_excess)
@@ -437,7 +437,7 @@ __mem_cgroup_insert_exceeded(struct mem_cgroup *mem,
}
static void
-__mem_cgroup_remove_exceeded(struct mem_cgroup *mem,
+__mem_cgroup_remove_exceeded(struct mem_cgroup *memcg,
struct mem_cgroup_per_zone *mz,
struct mem_cgroup_tree_per_zone *mctz)
{
@@ -448,17 +448,17 @@ __mem_cgroup_remove_exceeded(struct mem_cgroup *mem,
}
static void
-mem_cgroup_remove_exceeded(struct mem_cgroup *mem,
+mem_cgroup_remove_exceeded(struct mem_cgroup *memcg,
struct mem_cgroup_per_zone *mz,
struct mem_cgroup_tree_per_zone *mctz)
{
spin_lock(&mctz->lock);
- __mem_cgroup_remove_exceeded(mem, mz, mctz);
+ __mem_cgroup_remove_exceeded(memcg, mz, mctz);
spin_unlock(&mctz->lock);
}
-static void mem_cgroup_update_tree(struct mem_cgroup *mem, struct page *page)
+static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
{
unsigned long long excess;
struct mem_cgroup_per_zone *mz;
@@ -471,9 +471,9 @@ static void mem_cgroup_update_tree(struct mem_cgroup *mem, struct page *page)
* Necessary to update all ancestors when hierarchy is used.
* because their event counter is not touched.
*/
- for (; mem; mem = parent_mem_cgroup(mem)) {
- mz = mem_cgroup_zoneinfo(mem, nid, zid);
- excess = res_counter_soft_limit_excess(&mem->res);
+ for (; memcg; memcg = parent_mem_cgroup(memcg)) {
+ mz = mem_cgroup_zoneinfo(memcg, nid, zid);
+ excess = res_counter_soft_limit_excess(&memcg->res);
/*
* We have to update the tree if mz is on RB-tree or
* mem is over its softlimit.
@@ -482,18 +482,18 @@ static void mem_cgroup_update_tree(struct mem_cgroup *mem, struct page *page)
spin_lock(&mctz->lock);
/* if on-tree, remove it */
if (mz->on_tree)
- __mem_cgroup_remove_exceeded(mem, mz, mctz);
+ __mem_cgroup_remove_exceeded(memcg, mz, mctz);
/*
* Insert again. mz->usage_in_excess will be updated.
* If excess is 0, no tree ops.
*/
- __mem_cgroup_insert_exceeded(mem, mz, mctz, excess);
+ __mem_cgroup_insert_exceeded(memcg, mz, mctz, excess);
spin_unlock(&mctz->lock);
}
}
}
-static void mem_cgroup_remove_from_trees(struct mem_cgroup *mem)
+static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
{
int node, zone;
struct mem_cgroup_per_zone *mz;
@@ -501,9 +501,9 @@ static void mem_cgroup_remove_from_trees(struct mem_cgroup *mem)
for_each_node_state(node, N_POSSIBLE) {
for (zone = 0; zone < MAX_NR_ZONES; zone++) {
- mz = mem_cgroup_zoneinfo(mem, node, zone);
+ mz = mem_cgroup_zoneinfo(memcg, node, zone);
mctz = soft_limit_tree_node_zone(node, zone);
- mem_cgroup_remove_exceeded(mem, mz, mctz);
+ mem_cgroup_remove_exceeded(memcg, mz, mctz);
}
}
}
@@ -564,7 +564,7 @@ mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
* common workload, threashold and synchonization as vmstat[] should be
* implemented.
*/
-static long mem_cgroup_read_stat(struct mem_cgroup *mem,
+static long mem_cgroup_read_stat(struct mem_cgroup *memcg,
enum mem_cgroup_stat_index idx)
{
long val = 0;
@@ -572,81 +572,83 @@ static long mem_cgroup_read_stat(struct mem_cgroup *mem,
get_online_cpus();
for_each_online_cpu(cpu)
- val += per_cpu(mem->stat->count[idx], cpu);
+ val += per_cpu(memcg->stat->count[idx], cpu);
#ifdef CONFIG_HOTPLUG_CPU
- spin_lock(&mem->pcp_counter_lock);
- val += mem->nocpu_base.count[idx];
- spin_unlock(&mem->pcp_counter_lock);
+ spin_lock(&memcg->pcp_counter_lock);
+ val += memcg->nocpu_base.count[idx];
+ spin_unlock(&memcg->pcp_counter_lock);
#endif
put_online_cpus();
return val;
}
-static void mem_cgroup_swap_statistics(struct mem_cgroup *mem,
+static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg,
bool charge)
{
int val = (charge) ? 1 : -1;
- this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_SWAPOUT], val);
+ this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_SWAPOUT], val);
}
-void mem_cgroup_pgfault(struct mem_cgroup *mem, int val)
+void mem_cgroup_pgfault(struct mem_cgroup *memcg, int val)
{
- this_cpu_add(mem->stat->events[MEM_CGROUP_EVENTS_PGFAULT], val);
+ this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_PGFAULT], val);
}
-void mem_cgroup_pgmajfault(struct mem_cgroup *mem, int val)
+void mem_cgroup_pgmajfault(struct mem_cgroup *memcg, int val)
{
- this_cpu_add(mem->stat->events[MEM_CGROUP_EVENTS_PGMAJFAULT], val);
+ this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_PGMAJFAULT], val);
}
-static unsigned long mem_cgroup_read_events(struct mem_cgroup *mem,
+static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg,
enum mem_cgroup_events_index idx)
{
unsigned long val = 0;
int cpu;
for_each_online_cpu(cpu)
- val += per_cpu(mem->stat->events[idx], cpu);
+ val += per_cpu(memcg->stat->events[idx], cpu);
#ifdef CONFIG_HOTPLUG_CPU
- spin_lock(&mem->pcp_counter_lock);
- val += mem->nocpu_base.events[idx];
- spin_unlock(&mem->pcp_counter_lock);
+ spin_lock(&memcg->pcp_counter_lock);
+ val += memcg->nocpu_base.events[idx];
+ spin_unlock(&memcg->pcp_counter_lock);
#endif
return val;
}
-static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
+static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
bool file, int nr_pages)
{
preempt_disable();
if (file)
- __this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_CACHE], nr_pages);
+ __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE],
+ nr_pages);
else
- __this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_RSS], nr_pages);
+ __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS],
+ nr_pages);
/* pagein of a big page is an event. So, ignore page size */
if (nr_pages > 0)
- __this_cpu_inc(mem->stat->events[MEM_CGROUP_EVENTS_PGPGIN]);
+ __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGIN]);
else {
- __this_cpu_inc(mem->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]);
+ __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]);
nr_pages = -nr_pages; /* for event */
}
- __this_cpu_add(mem->stat->events[MEM_CGROUP_EVENTS_COUNT], nr_pages);
+ __this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_COUNT], nr_pages);
preempt_enable();
}
unsigned long
-mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *mem, int nid, int zid,
+mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, int nid, int zid,
unsigned int lru_mask)
{
struct mem_cgroup_per_zone *mz;
enum lru_list l;
unsigned long ret = 0;
- mz = mem_cgroup_zoneinfo(mem, nid, zid);
+ mz = mem_cgroup_zoneinfo(memcg, nid, zid);
for_each_lru(l) {
if (BIT(l) & lru_mask)
@@ -656,44 +658,45 @@ mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *mem, int nid, int zid,
}
static unsigned long
-mem_cgroup_node_nr_lru_pages(struct mem_cgroup *mem,
+mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
int nid, unsigned int lru_mask)
{
u64 total = 0;
int zid;
for (zid = 0; zid < MAX_NR_ZONES; zid++)
- total += mem_cgroup_zone_nr_lru_pages(mem, nid, zid, lru_mask);
+ total += mem_cgroup_zone_nr_lru_pages(memcg,
+ nid, zid, lru_mask);
return total;
}
-static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *mem,
+static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
unsigned int lru_mask)
{
int nid;
u64 total = 0;
for_each_node_state(nid, N_HIGH_MEMORY)
- total += mem_cgroup_node_nr_lru_pages(mem, nid, lru_mask);
+ total += mem_cgroup_node_nr_lru_pages(memcg, nid, lru_mask);
return total;
}
-static bool __memcg_event_check(struct mem_cgroup *mem, int target)
+static bool __memcg_event_check(struct mem_cgroup *memcg, int target)
{
unsigned long val, next;
- val = this_cpu_read(mem->stat->events[MEM_CGROUP_EVENTS_COUNT]);
- next = this_cpu_read(mem->stat->targets[target]);
+ val = this_cpu_read(memcg->stat->events[MEM_CGROUP_EVENTS_COUNT]);
+ next = this_cpu_read(memcg->stat->targets[target]);
/* from time_after() in jiffies.h */
return ((long)next - (long)val < 0);
}
-static void __mem_cgroup_target_update(struct mem_cgroup *mem, int target)
+static void __mem_cgroup_target_update(struct mem_cgroup *memcg, int target)
{
unsigned long val, next;
- val = this_cpu_read(mem->stat->events[MEM_CGROUP_EVENTS_COUNT]);
+ val = this_cpu_read(memcg->stat->events[MEM_CGROUP_EVENTS_COUNT]);
switch (target) {
case MEM_CGROUP_TARGET_THRESH:
@@ -709,30 +712,30 @@ static void __mem_cgroup_target_update(struct mem_cgroup *mem, int target)
return;
}
- this_cpu_write(mem->stat->targets[target], next);
+ this_cpu_write(memcg->stat->targets[target], next);
}
/*
* Check events in order.
*
*/
-static void memcg_check_events(struct mem_cgroup *mem, struct page *page)
+static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
{
/* threshold event is triggered in finer grain than soft limit */
- if (unlikely(__memcg_event_check(mem, MEM_CGROUP_TARGET_THRESH))) {
- mem_cgroup_threshold(mem);
- __mem_cgroup_target_update(mem, MEM_CGROUP_TARGET_THRESH);
- if (unlikely(__memcg_event_check(mem,
+ if (unlikely(__memcg_event_check(memcg, MEM_CGROUP_TARGET_THRESH))) {
+ mem_cgroup_threshold(memcg);
+ __mem_cgroup_target_update(memcg, MEM_CGROUP_TARGET_THRESH);
+ if (unlikely(__memcg_event_check(memcg,
MEM_CGROUP_TARGET_SOFTLIMIT))) {
- mem_cgroup_update_tree(mem, page);
- __mem_cgroup_target_update(mem,
+ mem_cgroup_update_tree(memcg, page);
+ __mem_cgroup_target_update(memcg,
MEM_CGROUP_TARGET_SOFTLIMIT);
}
#if MAX_NUMNODES > 1
- if (unlikely(__memcg_event_check(mem,
+ if (unlikely(__memcg_event_check(memcg,
MEM_CGROUP_TARGET_NUMAINFO))) {
- atomic_inc(&mem->numainfo_events);
- __mem_cgroup_target_update(mem,
+ atomic_inc(&memcg->numainfo_events);
+ __mem_cgroup_target_update(memcg,
MEM_CGROUP_TARGET_NUMAINFO);
}
#endif
@@ -762,7 +765,7 @@ struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
{
- struct mem_cgroup *mem = NULL;
+ struct mem_cgroup *memcg = NULL;
if (!mm)
return NULL;
@@ -773,25 +776,25 @@ struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
*/
rcu_read_lock();
do {
- mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
- if (unlikely(!mem))
+ memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
+ if (unlikely(!memcg))
break;
- } while (!css_tryget(&mem->css));
+ } while (!css_tryget(&memcg->css));
rcu_read_unlock();
- return mem;
+ return memcg;
}
/* The caller has to guarantee "mem" exists before calling this */
-static struct mem_cgroup *mem_cgroup_start_loop(struct mem_cgroup *mem)
+static struct mem_cgroup *mem_cgroup_start_loop(struct mem_cgroup *memcg)
{
struct cgroup_subsys_state *css;
int found;
- if (!mem) /* ROOT cgroup has the smallest ID */
+ if (!memcg) /* ROOT cgroup has the smallest ID */
return root_mem_cgroup; /*css_put/get against root is ignored*/
- if (!mem->use_hierarchy) {
- if (css_tryget(&mem->css))
- return mem;
+ if (!memcg->use_hierarchy) {
+ if (css_tryget(&memcg->css))
+ return memcg;
return NULL;
}
rcu_read_lock();
@@ -799,13 +802,13 @@ static struct mem_cgroup *mem_cgroup_start_loop(struct mem_cgroup *mem)
* searching a memory cgroup which has the smallest ID under given
* ROOT cgroup. (ID >= 1)
*/
- css = css_get_next(&mem_cgroup_subsys, 1, &mem->css, &found);
+ css = css_get_next(&mem_cgroup_subsys, 1, &memcg->css, &found);
if (css && css_tryget(css))
- mem = container_of(css, struct mem_cgroup, css);
+ memcg = container_of(css, struct mem_cgroup, css);
else
- mem = NULL;
+ memcg = NULL;
rcu_read_unlock();
- return mem;
+ return memcg;
}
static struct mem_cgroup *mem_cgroup_get_next(struct mem_cgroup *iter,
@@ -859,29 +862,29 @@ static struct mem_cgroup *mem_cgroup_get_next(struct mem_cgroup *iter,
for_each_mem_cgroup_tree_cond(iter, NULL, true)
-static inline bool mem_cgroup_is_root(struct mem_cgroup *mem)
+static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
{
- return (mem == root_mem_cgroup);
+ return (memcg == root_mem_cgroup);
}
void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
{
- struct mem_cgroup *mem;
+ struct mem_cgroup *memcg;
if (!mm)
return;
rcu_read_lock();
- mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
- if (unlikely(!mem))
+ memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
+ if (unlikely(!memcg))
goto out;
switch (idx) {
case PGMAJFAULT:
- mem_cgroup_pgmajfault(mem, 1);
+ mem_cgroup_pgmajfault(memcg, 1);
break;
case PGFAULT:
- mem_cgroup_pgfault(mem, 1);
+ mem_cgroup_pgfault(memcg, 1);
break;
default:
BUG();
@@ -1063,21 +1066,21 @@ void mem_cgroup_move_lists(struct page *page,
}
/*
- * Checks whether given mem is same or in the root_mem's
+ * Checks whether given mem is same or in the root_mem_cgroup's
* hierarchy subtree
*/
-static bool mem_cgroup_same_or_subtree(const struct mem_cgroup *root_mem,
- struct mem_cgroup *mem)
+static bool mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
+ struct mem_cgroup *memcg)
{
- if (root_mem != mem) {
- return (root_mem->use_hierarchy &&
- css_is_ancestor(&mem->css, &root_mem->css));
+ if (root_memcg != memcg) {
+ return (root_memcg->use_hierarchy &&
+ css_is_ancestor(&memcg->css, &root_memcg->css));
}
return true;
}
-int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
+int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *memcg)
{
int ret;
struct mem_cgroup *curr = NULL;
@@ -1091,12 +1094,12 @@ int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
if (!curr)
return 0;
/*
- * We should check use_hierarchy of "mem" not "curr". Because checking
+ * We should check use_hierarchy of "memcg" not "curr". Because checking
* use_hierarchy of "curr" here make this function true if hierarchy is
- * enabled in "curr" and "curr" is a child of "mem" in *cgroup*
- * hierarchy(even if use_hierarchy is disabled in "mem").
+ * enabled in "curr" and "curr" is a child of "memcg" in *cgroup*
+ * hierarchy(even if use_hierarchy is disabled in "memcg").
*/
- ret = mem_cgroup_same_or_subtree(mem, curr);
+ ret = mem_cgroup_same_or_subtree(memcg, curr);
css_put(&curr->css);
return ret;
}
@@ -1254,13 +1257,13 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
* Returns the maximum amount of memory @mem can be charged with, in
* pages.
*/
-static unsigned long mem_cgroup_margin(struct mem_cgroup *mem)
+static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
{
unsigned long long margin;
- margin = res_counter_margin(&mem->res);
+ margin = res_counter_margin(&memcg->res);
if (do_swap_account)
- margin = min(margin, res_counter_margin(&mem->memsw));
+ margin = min(margin, res_counter_margin(&memcg->memsw));
return margin >> PAGE_SHIFT;
}
@@ -1275,33 +1278,33 @@ int mem_cgroup_swappiness(struct mem_cgroup *memcg)
return memcg->swappiness;
}
-static void mem_cgroup_start_move(struct mem_cgroup *mem)
+static void mem_cgroup_start_move(struct mem_cgroup *memcg)
{
int cpu;
get_online_cpus();
- spin_lock(&mem->pcp_counter_lock);
+ spin_lock(&memcg->pcp_counter_lock);
for_each_online_cpu(cpu)
- per_cpu(mem->stat->count[MEM_CGROUP_ON_MOVE], cpu) += 1;
- mem->nocpu_base.count[MEM_CGROUP_ON_MOVE] += 1;
- spin_unlock(&mem->pcp_counter_lock);
+ per_cpu(memcg->stat->count[MEM_CGROUP_ON_MOVE], cpu) += 1;
+ memcg->nocpu_base.count[MEM_CGROUP_ON_MOVE] += 1;
+ spin_unlock(&memcg->pcp_counter_lock);
put_online_cpus();
synchronize_rcu();
}
-static void mem_cgroup_end_move(struct mem_cgroup *mem)
+static void mem_cgroup_end_move(struct mem_cgroup *memcg)
{
int cpu;
- if (!mem)
+ if (!memcg)
return;
get_online_cpus();
- spin_lock(&mem->pcp_counter_lock);
+ spin_lock(&memcg->pcp_counter_lock);
for_each_online_cpu(cpu)
- per_cpu(mem->stat->count[MEM_CGROUP_ON_MOVE], cpu) -= 1;
- mem->nocpu_base.count[MEM_CGROUP_ON_MOVE] -= 1;
- spin_unlock(&mem->pcp_counter_lock);
+ per_cpu(memcg->stat->count[MEM_CGROUP_ON_MOVE], cpu) -= 1;
+ memcg->nocpu_base.count[MEM_CGROUP_ON_MOVE] -= 1;
+ spin_unlock(&memcg->pcp_counter_lock);
put_online_cpus();
}
/*
@@ -1316,13 +1319,13 @@ static void mem_cgroup_end_move(struct mem_cgroup *mem)
* waiting at hith-memory prressure caused by "move".
*/
-static bool mem_cgroup_stealed(struct mem_cgroup *mem)
+static bool mem_cgroup_stealed(struct mem_cgroup *memcg)
{
VM_BUG_ON(!rcu_read_lock_held());
- return this_cpu_read(mem->stat->count[MEM_CGROUP_ON_MOVE]) > 0;
+ return this_cpu_read(memcg->stat->count[MEM_CGROUP_ON_MOVE]) > 0;
}
-static bool mem_cgroup_under_move(struct mem_cgroup *mem)
+static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
{
struct mem_cgroup *from;
struct mem_cgroup *to;
@@ -1337,17 +1340,17 @@ static bool mem_cgroup_under_move(struct mem_cgroup *mem)
if (!from)
goto unlock;
- ret = mem_cgroup_same_or_subtree(mem, from)
- || mem_cgroup_same_or_subtree(mem, to);
+ ret = mem_cgroup_same_or_subtree(memcg, from)
+ || mem_cgroup_same_or_subtree(memcg, to);
unlock:
spin_unlock(&mc.lock);
return ret;
}
-static bool mem_cgroup_wait_acct_move(struct mem_cgroup *mem)
+static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
{
if (mc.moving_task && current != mc.moving_task) {
- if (mem_cgroup_under_move(mem)) {
+ if (mem_cgroup_under_move(memcg)) {
DEFINE_WAIT(wait);
prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
/* moving charge context might have finished. */
@@ -1431,12 +1434,12 @@ done:
* This function returns the number of memcg under hierarchy tree. Returns
* 1(self count) if no children.
*/
-static int mem_cgroup_count_children(struct mem_cgroup *mem)
+static int mem_cgroup_count_children(struct mem_cgroup *memcg)
{
int num = 0;
struct mem_cgroup *iter;
- for_each_mem_cgroup_tree(iter, mem)
+ for_each_mem_cgroup_tree(iter, memcg)
num++;
return num;
}
@@ -1466,21 +1469,21 @@ u64 mem_cgroup_get_limit(struct mem_cgroup *memcg)
* that to reclaim free pages from.
*/
static struct mem_cgroup *
-mem_cgroup_select_victim(struct mem_cgroup *root_mem)
+mem_cgroup_select_victim(struct mem_cgroup *root_memcg)
{
struct mem_cgroup *ret = NULL;
struct cgroup_subsys_state *css;
int nextid, found;
- if (!root_mem->use_hierarchy) {
- css_get(&root_mem->css);
- ret = root_mem;
+ if (!root_memcg->use_hierarchy) {
+ css_get(&root_memcg->css);
+ ret = root_memcg;
}
while (!ret) {
rcu_read_lock();
- nextid = root_mem->last_scanned_child + 1;
- css = css_get_next(&mem_cgroup_subsys, nextid, &root_mem->css,
+ nextid = root_memcg->last_scanned_child + 1;
+ css = css_get_next(&mem_cgroup_subsys, nextid, &root_memcg->css,
&found);
if (css && css_tryget(css))
ret = container_of(css, struct mem_cgroup, css);
@@ -1489,9 +1492,9 @@ mem_cgroup_select_victim(struct mem_cgroup *root_mem)
/* Updates scanning parameter */
if (!css) {
/* this means start scan from ID:1 */
- root_mem->last_scanned_child = 0;
+ root_memcg->last_scanned_child = 0;
} else
- root_mem->last_scanned_child = found;
+ root_memcg->last_scanned_child = found;
}
return ret;
@@ -1507,14 +1510,14 @@ mem_cgroup_select_victim(struct mem_cgroup *root_mem)
* reclaimable pages on a node. Returns true if there are any reclaimable
* pages in the node.
*/
-static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *mem,
+static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg,
int nid, bool noswap)
{
- if (mem_cgroup_node_nr_lru_pages(mem, nid, LRU_ALL_FILE))
+ if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_FILE))
return true;
if (noswap || !total_swap_pages)
return false;
- if (mem_cgroup_node_nr_lru_pages(mem, nid, LRU_ALL_ANON))
+ if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_ANON))
return true;
return false;
@@ -1527,29 +1530,29 @@ static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *mem,
* nodes based on the zonelist. So update the list loosely once per 10 secs.
*
*/
-static void mem_cgroup_may_update_nodemask(struct mem_cgroup *mem)
+static void mem_cgroup_may_update_nodemask(struct mem_cgroup *memcg)
{
int nid;
/*
* numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET
* pagein/pageout changes since the last update.
*/
- if (!atomic_read(&mem->numainfo_events))
+ if (!atomic_read(&memcg->numainfo_events))
return;
- if (atomic_inc_return(&mem->numainfo_updating) > 1)
+ if (atomic_inc_return(&memcg->numainfo_updating) > 1)
return;
/* make a nodemask where this memcg uses memory from */
- mem->scan_nodes = node_states[N_HIGH_MEMORY];
+ memcg->scan_nodes = node_states[N_HIGH_MEMORY];
for_each_node_mask(nid, node_states[N_HIGH_MEMORY]) {
- if (!test_mem_cgroup_node_reclaimable(mem, nid, false))
- node_clear(nid, mem->scan_nodes);
+ if (!test_mem_cgroup_node_reclaimable(memcg, nid, false))
+ node_clear(nid, memcg->scan_nodes);
}
- atomic_set(&mem->numainfo_events, 0);
- atomic_set(&mem->numainfo_updating, 0);
+ atomic_set(&memcg->numainfo_events, 0);
+ atomic_set(&memcg->numainfo_updating, 0);
}
/*
@@ -1564,16 +1567,16 @@ static void mem_cgroup_may_update_nodemask(struct mem_cgroup *mem)
*
* Now, we use round-robin. Better algorithm is welcomed.
*/
-int mem_cgroup_select_victim_node(struct mem_cgroup *mem)
+int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
{
int node;
- mem_cgroup_may_update_nodemask(mem);
- node = mem->last_scanned_node;
+ mem_cgroup_may_update_nodemask(memcg);
+ node = memcg->last_scanned_node;
- node = next_node(node, mem->scan_nodes);
+ node = next_node(node, memcg->scan_nodes);
if (node == MAX_NUMNODES)
- node = first_node(mem->scan_nodes);
+ node = first_node(memcg->scan_nodes);
/*
* We call this when we hit limit, not when pages are added to LRU.
* No LRU may hold pages because all pages are UNEVICTABLE or
@@ -1583,7 +1586,7 @@ int mem_cgroup_select_victim_node(struct mem_cgroup *mem)
if (unlikely(node == MAX_NUMNODES))
node = numa_node_id();
- mem->last_scanned_node = node;
+ memcg->last_scanned_node = node;
return node;
}
@@ -1593,7 +1596,7 @@ int mem_cgroup_select_victim_node(struct mem_cgroup *mem)
* unused nodes. But scan_nodes is lazily updated and may not cotain
* enough new information. We need to do double check.
*/
-bool mem_cgroup_reclaimable(struct mem_cgroup *mem, bool noswap)
+bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
{
int nid;
@@ -1601,12 +1604,12 @@ bool mem_cgroup_reclaimable(struct mem_cgroup *mem, bool noswap)
* quick check...making use of scan_node.
* We can skip unused nodes.
*/
- if (!nodes_empty(mem->scan_nodes)) {
- for (nid = first_node(mem->scan_nodes);
+ if (!nodes_empty(memcg->scan_nodes)) {
+ for (nid = first_node(memcg->scan_nodes);
nid < MAX_NUMNODES;
- nid = next_node(nid, mem->scan_nodes)) {
+ nid = next_node(nid, memcg->scan_nodes)) {
- if (test_mem_cgroup_node_reclaimable(mem, nid, noswap))
+ if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap))
return true;
}
}
@@ -1614,23 +1617,23 @@ bool mem_cgroup_reclaimable(struct mem_cgroup *mem, bool noswap)
* Check rest of nodes.
*/
for_each_node_state(nid, N_HIGH_MEMORY) {
- if (node_isset(nid, mem->scan_nodes))
+ if (node_isset(nid, memcg->scan_nodes))
continue;
- if (test_mem_cgroup_node_reclaimable(mem, nid, noswap))
+ if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap))
return true;
}
return false;
}
#else
-int mem_cgroup_select_victim_node(struct mem_cgroup *mem)
+int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
{
return 0;
}
-bool mem_cgroup_reclaimable(struct mem_cgroup *mem, bool noswap)
+bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
{
- return test_mem_cgroup_node_reclaimable(mem, 0, noswap);
+ return test_mem_cgroup_node_reclaimable(memcg, 0, noswap);
}
#endif
@@ -1639,14 +1642,14 @@ bool mem_cgroup_reclaimable(struct mem_cgroup *mem, bool noswap)
* we reclaimed from, so that we don't end up penalizing one child extensively
* based on its position in the children list.
*
- * root_mem is the original ancestor that we've been reclaim from.
+ * root_memcg is the original ancestor that we've been reclaim from.
*
- * We give up and return to the caller when we visit root_mem twice.
+ * We give up and return to the caller when we visit root_memcg twice.
* (other groups can be removed while we're walking....)
*
* If shrink==true, for avoiding to free too much, this returns immedieately.
*/
-static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
+static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_memcg,
struct zone *zone,
gfp_t gfp_mask,
unsigned long reclaim_options,
@@ -1661,15 +1664,15 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
unsigned long excess;
unsigned long nr_scanned;
- excess = res_counter_soft_limit_excess(&root_mem->res) >> PAGE_SHIFT;
+ excess = res_counter_soft_limit_excess(&root_memcg->res) >> PAGE_SHIFT;
/* If memsw_is_minimum==1, swap-out is of-no-use. */
- if (!check_soft && !shrink && root_mem->memsw_is_minimum)
+ if (!check_soft && !shrink && root_memcg->memsw_is_minimum)
noswap = true;
while (1) {
- victim = mem_cgroup_select_victim(root_mem);
- if (victim == root_mem) {
+ victim = mem_cgroup_select_victim(root_memcg);
+ if (victim == root_memcg) {
loop++;
/*
* We are not draining per cpu cached charges during
@@ -1678,7 +1681,7 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
* charges will not give any.
*/
if (!check_soft && loop >= 1)
- drain_all_stock_async(root_mem);
+ drain_all_stock_async(root_memcg);
if (loop >= 2) {
/*
* If we have not been able to reclaim
@@ -1725,9 +1728,9 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
return ret;
total += ret;
if (check_soft) {
- if (!res_counter_soft_limit_excess(&root_mem->res))
+ if (!res_counter_soft_limit_excess(&root_memcg->res))
return total;
- } else if (mem_cgroup_margin(root_mem))
+ } else if (mem_cgroup_margin(root_memcg))
return total;
}
return total;
@@ -1738,12 +1741,12 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
* If someone is running, return false.
* Has to be called with memcg_oom_lock
*/
-static bool mem_cgroup_oom_lock(struct mem_cgroup *mem)
+static bool mem_cgroup_oom_lock(struct mem_cgroup *memcg)
{
struct mem_cgroup *iter, *failed = NULL;
bool cond = true;
- for_each_mem_cgroup_tree_cond(iter, mem, cond) {
+ for_each_mem_cgroup_tree_cond(iter, memcg, cond) {
if (iter->oom_lock) {
/*
* this subtree of our hierarchy is already locked
@@ -1763,7 +1766,7 @@ static bool mem_cgroup_oom_lock(struct mem_cgroup *mem)
* what we set up to the failing subtree
*/
cond = true;
- for_each_mem_cgroup_tree_cond(iter, mem, cond) {
+ for_each_mem_cgroup_tree_cond(iter, memcg, cond) {
if (iter == failed) {
cond = false;
continue;
@@ -1776,24 +1779,24 @@ static bool mem_cgroup_oom_lock(struct mem_cgroup *mem)
/*
* Has to be called with memcg_oom_lock
*/
-static int mem_cgroup_oom_unlock(struct mem_cgroup *mem)
+static int mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
{
struct mem_cgroup *iter;
- for_each_mem_cgroup_tree(iter, mem)
+ for_each_mem_cgroup_tree(iter, memcg)
iter->oom_lock = false;
return 0;
}
-static void mem_cgroup_mark_under_oom(struct mem_cgroup *mem)
+static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
{
struct mem_cgroup *iter;
- for_each_mem_cgroup_tree(iter, mem)
+ for_each_mem_cgroup_tree(iter, memcg)
atomic_inc(&iter->under_oom);
}
-static void mem_cgroup_unmark_under_oom(struct mem_cgroup *mem)
+static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
{
struct mem_cgroup *iter;
@@ -1802,7 +1805,7 @@ static void mem_cgroup_unmark_under_oom(struct mem_cgroup *mem)
* mem_cgroup_oom_lock() may not be called. We have to use
* atomic_add_unless() here.
*/
- for_each_mem_cgroup_tree(iter, mem)
+ for_each_mem_cgroup_tree(iter, memcg)
atomic_add_unless(&iter->under_oom, -1, 0);
}
@@ -1817,80 +1820,80 @@ struct oom_wait_info {
static int memcg_oom_wake_function(wait_queue_t *wait,
unsigned mode, int sync, void *arg)
{
- struct mem_cgroup *wake_mem = (struct mem_cgroup *)arg,
- *oom_wait_mem;
+ struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg,
+ *oom_wait_memcg;
struct oom_wait_info *oom_wait_info;
oom_wait_info = container_of(wait, struct oom_wait_info, wait);
- oom_wait_mem = oom_wait_info->mem;
+ oom_wait_memcg = oom_wait_info->mem;
/*
* Both of oom_wait_info->mem and wake_mem are stable under us.
* Then we can use css_is_ancestor without taking care of RCU.
*/
- if (!mem_cgroup_same_or_subtree(oom_wait_mem, wake_mem)
- && !mem_cgroup_same_or_subtree(wake_mem, oom_wait_mem))
+ if (!mem_cgroup_same_or_subtree(oom_wait_memcg, wake_memcg)
+ && !mem_cgroup_same_or_subtree(wake_memcg, oom_wait_memcg))
return 0;
return autoremove_wake_function(wait, mode, sync, arg);
}
-static void memcg_wakeup_oom(struct mem_cgroup *mem)
+static void memcg_wakeup_oom(struct mem_cgroup *memcg)
{
- /* for filtering, pass "mem" as argument. */
- __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, mem);
+ /* for filtering, pass "memcg" as argument. */
+ __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
}
-static void memcg_oom_recover(struct mem_cgroup *mem)
+static void memcg_oom_recover(struct mem_cgroup *memcg)
{
- if (mem && atomic_read(&mem->under_oom))
- memcg_wakeup_oom(mem);
+ if (memcg && atomic_read(&memcg->under_oom))
+ memcg_wakeup_oom(memcg);
}
/*
* try to call OOM killer. returns false if we should exit memory-reclaim loop.
*/
-bool mem_cgroup_handle_oom(struct mem_cgroup *mem, gfp_t mask)
+bool mem_cgroup_handle_oom(struct mem_cgroup *memcg, gfp_t mask)
{
struct oom_wait_info owait;
bool locked, need_to_kill;
- owait.mem = mem;
+ owait.mem = memcg;
owait.wait.flags = 0;
owait.wait.func = memcg_oom_wake_function;
owait.wait.private = current;
INIT_LIST_HEAD(&owait.wait.task_list);
need_to_kill = true;
- mem_cgroup_mark_under_oom(mem);
+ mem_cgroup_mark_under_oom(memcg);
- /* At first, try to OOM lock hierarchy under mem.*/
+ /* At first, try to OOM lock hierarchy under memcg.*/
spin_lock(&memcg_oom_lock);
- locked = mem_cgroup_oom_lock(mem);
+ locked = mem_cgroup_oom_lock(memcg);
/*
* Even if signal_pending(), we can't quit charge() loop without
* accounting. So, UNINTERRUPTIBLE is appropriate. But SIGKILL
* under OOM is always welcomed, use TASK_KILLABLE here.
*/
prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
- if (!locked || mem->oom_kill_disable)
+ if (!locked || memcg->oom_kill_disable)
need_to_kill = false;
if (locked)
- mem_cgroup_oom_notify(mem);
+ mem_cgroup_oom_notify(memcg);
spin_unlock(&memcg_oom_lock);
if (need_to_kill) {
finish_wait(&memcg_oom_waitq, &owait.wait);
- mem_cgroup_out_of_memory(mem, mask);
+ mem_cgroup_out_of_memory(memcg, mask);
} else {
schedule();
finish_wait(&memcg_oom_waitq, &owait.wait);
}
spin_lock(&memcg_oom_lock);
if (locked)
- mem_cgroup_oom_unlock(mem);
- memcg_wakeup_oom(mem);
+ mem_cgroup_oom_unlock(memcg);
+ memcg_wakeup_oom(memcg);
spin_unlock(&memcg_oom_lock);
- mem_cgroup_unmark_under_oom(mem);
+ mem_cgroup_unmark_under_oom(memcg);
if (test_thread_flag(TIF_MEMDIE) || fatal_signal_pending(current))
return false;
@@ -1926,7 +1929,7 @@ bool mem_cgroup_handle_oom(struct mem_cgroup *mem, gfp_t mask)
void mem_cgroup_update_page_stat(struct page *page,
enum mem_cgroup_page_stat_item idx, int val)
{
- struct mem_cgroup *mem;
+ struct mem_cgroup *memcg;
struct page_cgroup *pc = lookup_page_cgroup(page);
bool need_unlock = false;
unsigned long uninitialized_var(flags);
@@ -1935,16 +1938,16 @@ void mem_cgroup_update_page_stat(struct page *page,
return;
rcu_read_lock();
- mem = pc->mem_cgroup;
- if (unlikely(!mem || !PageCgroupUsed(pc)))