aboutsummaryrefslogtreecommitdiff
path: root/include/linux/compaction.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/compaction.h')
-rw-r--r--include/linux/compaction.h66
1 files changed, 58 insertions, 8 deletions
diff --git a/include/linux/compaction.h b/include/linux/compaction.h
index 5ac51552d90..01e3132820d 100644
--- a/include/linux/compaction.h
+++ b/include/linux/compaction.h
@@ -21,7 +21,11 @@ extern int sysctl_extfrag_handler(struct ctl_table *table, int write,
extern int fragmentation_index(struct zone *zone, unsigned int order);
extern unsigned long try_to_compact_pages(struct zonelist *zonelist,
- int order, gfp_t gfp_mask, nodemask_t *mask);
+ int order, gfp_t gfp_mask, nodemask_t *mask,
+ enum migrate_mode mode, bool *contended);
+extern void compact_pgdat(pg_data_t *pgdat, int order);
+extern void reset_isolation_suitable(pg_data_t *pgdat);
+extern unsigned long compaction_suitable(struct zone *zone, int order);
/* Do not skip compaction more than 64 times */
#define COMPACT_MAX_DEFER_SHIFT 6
@@ -31,41 +35,87 @@ extern unsigned long try_to_compact_pages(struct zonelist *zonelist,
* allocation success. 1 << compact_defer_limit compactions are skipped up
* to a limit of 1 << COMPACT_MAX_DEFER_SHIFT
*/
-static inline void defer_compaction(struct zone *zone)
+static inline void defer_compaction(struct zone *zone, int order)
{
zone->compact_considered = 0;
zone->compact_defer_shift++;
+ if (order < zone->compact_order_failed)
+ zone->compact_order_failed = order;
+
if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT)
zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT;
}
/* Returns true if compaction should be skipped this time */
-static inline bool compaction_deferred(struct zone *zone)
+static inline bool compaction_deferred(struct zone *zone, int order)
{
unsigned long defer_limit = 1UL << zone->compact_defer_shift;
+ if (order < zone->compact_order_failed)
+ return false;
+
/* Avoid possible overflow */
if (++zone->compact_considered > defer_limit)
zone->compact_considered = defer_limit;
- return zone->compact_considered < (1UL << zone->compact_defer_shift);
+ return zone->compact_considered < defer_limit;
+}
+
+/*
+ * Update defer tracking counters after successful compaction of given order,
+ * which means an allocation either succeeded (alloc_success == true) or is
+ * expected to succeed.
+ */
+static inline void compaction_defer_reset(struct zone *zone, int order,
+ bool alloc_success)
+{
+ if (alloc_success) {
+ zone->compact_considered = 0;
+ zone->compact_defer_shift = 0;
+ }
+ if (order >= zone->compact_order_failed)
+ zone->compact_order_failed = order + 1;
+}
+
+/* Returns true if restarting compaction after many failures */
+static inline bool compaction_restarting(struct zone *zone, int order)
+{
+ if (order < zone->compact_order_failed)
+ return false;
+
+ return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT &&
+ zone->compact_considered >= 1UL << zone->compact_defer_shift;
}
#else
static inline unsigned long try_to_compact_pages(struct zonelist *zonelist,
- int order, gfp_t gfp_mask, nodemask_t *nodemask)
+ int order, gfp_t gfp_mask, nodemask_t *nodemask,
+ enum migrate_mode mode, bool *contended)
{
return COMPACT_CONTINUE;
}
-static inline void defer_compaction(struct zone *zone)
+static inline void compact_pgdat(pg_data_t *pgdat, int order)
+{
+}
+
+static inline void reset_isolation_suitable(pg_data_t *pgdat)
+{
+}
+
+static inline unsigned long compaction_suitable(struct zone *zone, int order)
+{
+ return COMPACT_SKIPPED;
+}
+
+static inline void defer_compaction(struct zone *zone, int order)
{
}
-static inline bool compaction_deferred(struct zone *zone)
+static inline bool compaction_deferred(struct zone *zone, int order)
{
- return 1;
+ return true;
}
#endif /* CONFIG_COMPACTION */