From 6a18adb35c27848195c938b0779ce882d63d3ed1 Mon Sep 17 00:00:00 2001 From: Konstantin Khlebnikov Date: Tue, 29 May 2012 15:06:59 -0700 Subject: mm/vmscan: push zone pointer into shrink_page_list() It doesn't need a pointer to the cgroup - pointer to the zone is enough. This patch also kills the "mz" argument of page_check_references() - it is unused after "mm: memcg: count pte references from every member of the reclaimed hierarch" Signed-off-by: Konstantin Khlebnikov Cc: Mel Gorman Cc: KAMEZAWA Hiroyuki Acked-by: Hugh Dickins Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/vmscan.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) (limited to 'mm') diff --git a/mm/vmscan.c b/mm/vmscan.c index b7d03d7b8f8..eaa154bd1f8 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -629,7 +629,6 @@ enum page_references { }; static enum page_references page_check_references(struct page *page, - struct mem_cgroup_zone *mz, struct scan_control *sc) { int referenced_ptes, referenced_page; @@ -688,7 +687,7 @@ static enum page_references page_check_references(struct page *page, * shrink_page_list() returns the number of reclaimed pages */ static unsigned long shrink_page_list(struct list_head *page_list, - struct mem_cgroup_zone *mz, + struct zone *zone, struct scan_control *sc, unsigned long *ret_nr_dirty, unsigned long *ret_nr_writeback) @@ -718,7 +717,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, goto keep; VM_BUG_ON(PageActive(page)); - VM_BUG_ON(page_zone(page) != mz->zone); + VM_BUG_ON(page_zone(page) != zone); sc->nr_scanned++; @@ -741,7 +740,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, goto keep; } - references = page_check_references(page, mz, sc); + references = page_check_references(page, sc); switch (references) { case PAGEREF_ACTIVATE: goto activate_locked; @@ -931,7 +930,7 @@ keep: * will encounter the same problem */ if (nr_dirty && nr_dirty == nr_congested && global_reclaim(sc)) - zone_set_flag(mz->zone, ZONE_CONGESTED); + zone_set_flag(zone, ZONE_CONGESTED); free_hot_cold_page_list(&free_pages, 1); @@ -1309,7 +1308,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz, update_isolated_counts(mz, &page_list, &nr_anon, &nr_file); - nr_reclaimed = shrink_page_list(&page_list, mz, sc, + nr_reclaimed = shrink_page_list(&page_list, zone, sc, &nr_dirty, &nr_writeback); spin_lock_irq(&zone->lru_lock); -- cgit v1.2.3-18-g5258