diff options
-rw-r--r-- | kernel/sched_fair.c | 9 | ||||
-rw-r--r-- | kernel/sched_features.h | 9 |
2 files changed, 16 insertions, 2 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index a37f311f436..acf16a8d934 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -711,7 +711,7 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) if (!initial) { /* sleeps upto a single latency don't count. */ - if (sched_feat(NEW_FAIR_SLEEPERS)) { + if (sched_feat(FAIR_SLEEPERS)) { unsigned long thresh = sysctl_sched_latency; /* @@ -725,6 +725,13 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) task_of(se)->policy != SCHED_IDLE)) thresh = calc_delta_fair(thresh, se); + /* + * Halve their sleep time's effect, to allow + * for a gentler effect of sleepers: + */ + if (sched_feat(GENTLE_FAIR_SLEEPERS)) + thresh >>= 1; + vruntime -= thresh; } } diff --git a/kernel/sched_features.h b/kernel/sched_features.h index 70115c69c7a..fd375675f83 100644 --- a/kernel/sched_features.h +++ b/kernel/sched_features.h @@ -3,7 +3,14 @@ * considers the task to be running during that period. This gives it * a service deficit on wakeup, allowing it to run sooner. */ -SCHED_FEAT(NEW_FAIR_SLEEPERS, 0) +SCHED_FEAT(FAIR_SLEEPERS, 1) + +/* + * Only give sleepers 50% of their service deficit. This allows + * them to run sooner, but does not allow tons of sleepers to + * rip the spread apart. + */ +SCHED_FEAT(GENTLE_FAIR_SLEEPERS, 1) /* * By not normalizing the sleep time, heavy tasks get an effective |