diff options
-rw-r--r-- | mm/vmscan.c | 47 |
1 files changed, 30 insertions, 17 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index 8dfa59866af..d7dad2a4e69 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1138,7 +1138,7 @@ int __isolate_lru_page(struct page *page, isolate_mode_t mode, int file) * @mz: The mem_cgroup_zone to pull pages from. * @dst: The temp list to put pages on to. * @nr_scanned: The number of pages that were scanned. - * @order: The caller's attempted allocation order + * @sc: The scan_control struct for this reclaim session * @mode: One of the LRU isolation modes * @active: True [1] if isolating active pages * @file: True [1] if isolating file [!anon] pages @@ -1147,8 +1147,8 @@ int __isolate_lru_page(struct page *page, isolate_mode_t mode, int file) */ static unsigned long isolate_lru_pages(unsigned long nr_to_scan, struct mem_cgroup_zone *mz, struct list_head *dst, - unsigned long *nr_scanned, int order, isolate_mode_t mode, - int active, int file) + unsigned long *nr_scanned, struct scan_control *sc, + isolate_mode_t mode, int active, int file) { struct lruvec *lruvec; struct list_head *src; @@ -1194,7 +1194,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan, BUG(); } - if (!order) + if (!sc->order || !(sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM)) continue; /* @@ -1208,8 +1208,8 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan, */ zone_id = page_zone_id(page); page_pfn = page_to_pfn(page); - pfn = page_pfn & ~((1 << order) - 1); - end_pfn = pfn + (1 << order); + pfn = page_pfn & ~((1 << sc->order) - 1); + end_pfn = pfn + (1 << sc->order); for (; pfn < end_pfn; pfn++) { struct page *cursor_page; @@ -1275,7 +1275,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan, *nr_scanned = scan; - trace_mm_vmscan_lru_isolate(order, + trace_mm_vmscan_lru_isolate(sc->order, nr_to_scan, scan, nr_taken, nr_lumpy_taken, nr_lumpy_dirty, nr_lumpy_failed, @@ -1533,9 +1533,8 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz, spin_lock_irq(&zone->lru_lock); - nr_taken = isolate_lru_pages(nr_to_scan, mz, &page_list, - &nr_scanned, sc->order, - isolate_mode, 0, file); + nr_taken = isolate_lru_pages(nr_to_scan, mz, &page_list, &nr_scanned, + sc, isolate_mode, 0, file); if (global_reclaim(sc)) { zone->pages_scanned += nr_scanned; if (current_is_kswapd()) @@ -1711,8 +1710,7 @@ static void shrink_active_list(unsigned long nr_to_scan, spin_lock_irq(&zone->lru_lock); - nr_taken = isolate_lru_pages(nr_to_scan, mz, &l_hold, - &nr_scanned, sc->order, + nr_taken = isolate_lru_pages(nr_to_scan, mz, &l_hold, &nr_scanned, sc, isolate_mode, 1, file); if (global_reclaim(sc)) zone->pages_scanned += nr_scanned; @@ -2758,7 +2756,7 @@ loop_again: */ for (i = 0; i <= end_zone; i++) { struct zone *zone = pgdat->node_zones + i; - int nr_slab; + int nr_slab, testorder; unsigned long balance_gap; if (!populated_zone(zone)) @@ -2791,7 +2789,20 @@ loop_again: (zone->present_pages + KSWAPD_ZONE_BALANCE_GAP_RATIO-1) / KSWAPD_ZONE_BALANCE_GAP_RATIO); - if (!zone_watermark_ok_safe(zone, order, + /* + * Kswapd reclaims only single pages with compaction + * enabled. Trying too hard to reclaim until contiguous + * free pages have become available can hurt performance + * by evicting too much useful data from memory. + * Do not reclaim more than needed for compaction. + */ + testorder = order; + if (COMPACTION_BUILD && order && + compaction_suitable(zone, order) != + COMPACT_SKIPPED) + testorder = 0; + + if (!zone_watermark_ok_safe(zone, testorder, high_wmark_pages(zone) + balance_gap, end_zone, 0)) { shrink_zone(priority, zone, &sc); @@ -2820,7 +2831,7 @@ loop_again: continue; } - if (!zone_watermark_ok_safe(zone, order, + if (!zone_watermark_ok_safe(zone, testorder, high_wmark_pages(zone), end_zone, 0)) { all_zones_ok = 0; /* @@ -2917,6 +2928,10 @@ out: if (zone->all_unreclaimable && priority != DEF_PRIORITY) continue; + /* Would compaction fail due to lack of free memory? */ + if (compaction_suitable(zone, order) == COMPACT_SKIPPED) + goto loop_again; + /* Confirm the zone is balanced for order-0 */ if (!zone_watermark_ok(zone, 0, high_wmark_pages(zone), 0, 0)) { @@ -2926,8 +2941,6 @@ out: /* If balanced, clear the congested flag */ zone_clear_flag(zone, ZONE_CONGESTED); - if (i <= *classzone_idx) - balanced += zone->present_pages; } } |