aboutsummaryrefslogtreecommitdiff
path: root/mm/vmscan.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c15
1 files changed, 11 insertions, 4 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 463990941a7..4093b99044f 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2616,6 +2616,7 @@ static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, long remaining,
static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
int *classzone_idx)
{
+ bool pgdat_is_balanced = false;
struct zone *unbalanced_zone;
int i;
int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */
@@ -2690,8 +2691,11 @@ loop_again:
zone_clear_flag(zone, ZONE_CONGESTED);
}
}
- if (i < 0)
+
+ if (i < 0) {
+ pgdat_is_balanced = true;
goto out;
+ }
for (i = 0; i <= end_zone; i++) {
struct zone *zone = pgdat->node_zones + i;
@@ -2818,8 +2822,11 @@ loop_again:
pfmemalloc_watermark_ok(pgdat))
wake_up(&pgdat->pfmemalloc_wait);
- if (pgdat_balanced(pgdat, order, *classzone_idx))
+ if (pgdat_balanced(pgdat, order, *classzone_idx)) {
+ pgdat_is_balanced = true;
break; /* kswapd: all done */
+ }
+
/*
* OK, kswapd is getting into trouble. Take a nap, then take
* another pass across the zones.
@@ -2840,9 +2847,9 @@ loop_again:
if (sc.nr_reclaimed >= SWAP_CLUSTER_MAX)
break;
} while (--sc.priority >= 0);
-out:
- if (!pgdat_balanced(pgdat, order, *classzone_idx)) {
+out:
+ if (!pgdat_is_balanced) {
cond_resched();
try_to_freeze();