aboutsummaryrefslogtreecommitdiff
path: root/mm/vmscan.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c9
1 files changed, 5 insertions, 4 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index b7ed3767564..a1ce17f44be 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1752,7 +1752,7 @@ out:
/* Use reclaim/compaction for costly allocs or under memory pressure */
static bool in_reclaim_compaction(struct scan_control *sc)
{
- if (COMPACTION_BUILD && sc->order &&
+ if (IS_ENABLED(CONFIG_COMPACTION) && sc->order &&
(sc->order > PAGE_ALLOC_COSTLY_ORDER ||
sc->priority < DEF_PRIORITY - 2))
return true;
@@ -2005,7 +2005,7 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
if (zone->all_unreclaimable &&
sc->priority != DEF_PRIORITY)
continue; /* Let kswapd poll it */
- if (COMPACTION_BUILD) {
+ if (IS_ENABLED(CONFIG_COMPACTION)) {
/*
* If we already have plenty of memory free for
* compaction in this zone, don't free any more.
@@ -2421,7 +2421,8 @@ static bool zone_balanced(struct zone *zone, int order,
balance_gap, classzone_idx, 0))
return false;
- if (COMPACTION_BUILD && order && !compaction_suitable(zone, order))
+ if (IS_ENABLED(CONFIG_COMPACTION) && order &&
+ !compaction_suitable(zone, order))
return false;
return true;
@@ -2684,7 +2685,7 @@ loop_again:
* Do not reclaim more than needed for compaction.
*/
testorder = order;
- if (COMPACTION_BUILD && order &&
+ if (IS_ENABLED(CONFIG_COMPACTION) && order &&
compaction_suitable(zone, order) !=
COMPACT_SKIPPED)
testorder = 0;