From 1eb4927251a4e5ab152e64afb29453547365fde8 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Wed, 21 Mar 2012 16:34:19 -0700 Subject: memcg: lru_size instead of MEM_CGROUP_ZSTAT I never understood why we need a MEM_CGROUP_ZSTAT(mz, idx) macro to obscure the LRU counts. For easier searching? So call it lru_size rather than bare count (lru_length sounds better, but would be wrong, since each huge page raises lru_size hugely). Signed-off-by: Hugh Dickins Acked-by: Kirill A. Shutemov Acked-by: KAMEZAWA Hiroyuki Cc: Michal Hocko Cc: KOSAKI Motohiro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index e5370db7ad7..6405e78e26e 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -135,7 +135,7 @@ struct mem_cgroup_reclaim_iter { */ struct mem_cgroup_per_zone { struct lruvec lruvec; - unsigned long count[NR_LRU_LISTS]; + unsigned long lru_size[NR_LRU_LISTS]; struct mem_cgroup_reclaim_iter reclaim_iter[DEF_PRIORITY + 1]; @@ -147,8 +147,6 @@ struct mem_cgroup_per_zone { struct mem_cgroup *memcg; /* Back pointer, we cannot */ /* use container_of */ }; -/* Macro for accessing counter */ -#define MEM_CGROUP_ZSTAT(mz, idx) ((mz)->count[(idx)]) struct mem_cgroup_per_node { struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES]; @@ -728,7 +726,7 @@ mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, int nid, int zid, for_each_lru(l) { if (BIT(l) & lru_mask) - ret += MEM_CGROUP_ZSTAT(mz, l); + ret += mz->lru_size[l]; } return ret; } @@ -1077,7 +1075,7 @@ struct lruvec *mem_cgroup_lru_add_list(struct zone *zone, struct page *page, mz = page_cgroup_zoneinfo(memcg, page); /* compound_order() is stabilized through lru_lock */ - MEM_CGROUP_ZSTAT(mz, lru) += 1 << compound_order(page); + mz->lru_size[lru] += 1 << compound_order(page); return &mz->lruvec; } @@ -1105,8 +1103,8 @@ void mem_cgroup_lru_del_list(struct page *page, enum lru_list lru) VM_BUG_ON(!memcg); mz = page_cgroup_zoneinfo(memcg, page); /* huge page split is done under lru_lock. so, we have no races. */ - VM_BUG_ON(MEM_CGROUP_ZSTAT(mz, lru) < (1 << compound_order(page))); - MEM_CGROUP_ZSTAT(mz, lru) -= 1 << compound_order(page); + VM_BUG_ON(mz->lru_size[lru] < (1 << compound_order(page))); + mz->lru_size[lru] -= 1 << compound_order(page); } void mem_cgroup_lru_del(struct page *page) @@ -3629,7 +3627,7 @@ static int mem_cgroup_force_empty_list(struct mem_cgroup *memcg, mz = mem_cgroup_zoneinfo(memcg, node, zid); list = &mz->lruvec.lists[lru]; - loop = MEM_CGROUP_ZSTAT(mz, lru); + loop = mz->lru_size[lru]; /* give some margin against EBUSY etc...*/ loop += 256; busy = NULL; -- cgit v1.2.3-18-g5258