aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--mm/vmscan.c18
1 files changed, 12 insertions, 6 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index d036e59d302..6072d74a16f 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1748,6 +1748,7 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
enum lru_list l;
int noswap = 0;
int force_scan = 0;
+ unsigned long nr_force_scan[2];
anon = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_ANON) +
@@ -1770,6 +1771,8 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
fraction[0] = 0;
fraction[1] = 1;
denominator = 1;
+ nr_force_scan[0] = 0;
+ nr_force_scan[1] = SWAP_CLUSTER_MAX;
goto out;
}
@@ -1781,6 +1784,8 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
fraction[0] = 1;
fraction[1] = 0;
denominator = 1;
+ nr_force_scan[0] = SWAP_CLUSTER_MAX;
+ nr_force_scan[1] = 0;
goto out;
}
}
@@ -1829,6 +1834,11 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
fraction[0] = ap;
fraction[1] = fp;
denominator = ap + fp + 1;
+ if (force_scan) {
+ unsigned long scan = SWAP_CLUSTER_MAX;
+ nr_force_scan[0] = div64_u64(scan * ap, denominator);
+ nr_force_scan[1] = div64_u64(scan * fp, denominator);
+ }
out:
for_each_evictable_lru(l) {
int file = is_file_lru(l);
@@ -1849,12 +1859,8 @@ out:
* memcg, priority drop can cause big latency. So, it's better
* to scan small amount. See may_noscan above.
*/
- if (!scan && force_scan) {
- if (file)
- scan = SWAP_CLUSTER_MAX;
- else if (!noswap)
- scan = SWAP_CLUSTER_MAX;
- }
+ if (!scan && force_scan)
+ scan = nr_force_scan[file];
nr[l] = scan;
}
}