aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2008-06-27 13:41:38 +0200
committerIngo Molnar <mingo@elte.hu>2008-06-27 14:31:47 +0200
commitf1d239f73200a5803a89e5929fb3abc1596b7589 (patch)
tree33c59b6f2621284af91825ea7fbab718ffe65ade
parent83378269a5fad98f562ebc0f09c349575e6cbfe1 (diff)
sched: incremental effective_load()
Increase the accuracy of the effective_load values. Not only consider the current increment (as per the attempted wakeup), but also consider the delta between when we last adjusted the shares and the current situation. Signed-off-by: Peter Zijlstra <peterz@infradead.org> Cc: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com> Cc: Mike Galbraith <efault@gmx.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--kernel/sched.c6
-rw-r--r--kernel/sched_fair.c18
2 files changed, 21 insertions, 3 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 01d3e51b711..7613f69f097 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -427,6 +427,11 @@ struct cfs_rq {
* this cpu's part of tg->shares
*/
unsigned long shares;
+
+ /*
+ * load.weight at the time we set shares
+ */
+ unsigned long rq_weight;
#endif
#endif
};
@@ -1527,6 +1532,7 @@ __update_group_shares_cpu(struct task_group *tg, int cpu,
* record the actual number of shares, not the boosted amount.
*/
tg->cfs_rq[cpu]->shares = boost ? 0 : shares;
+ tg->cfs_rq[cpu]->rq_weight = rq_weight;
if (shares < MIN_SHARES)
shares = MIN_SHARES;
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index bed2f71e63d..e87f1a52f62 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1074,10 +1074,22 @@ static inline int wake_idle(int cpu, struct task_struct *p)
static const struct sched_class fair_sched_class;
#ifdef CONFIG_FAIR_GROUP_SCHED
-static unsigned long effective_load(struct task_group *tg, int cpu,
- unsigned long wl, unsigned long wg)
+static long effective_load(struct task_group *tg, int cpu,
+ long wl, long wg)
{
struct sched_entity *se = tg->se[cpu];
+ long more_w;
+
+ if (!tg->parent)
+ return wl;
+
+ /*
+ * Instead of using this increment, also add the difference
+ * between when the shares were last updated and now.
+ */
+ more_w = se->my_q->load.weight - se->my_q->rq_weight;
+ wl += more_w;
+ wg += more_w;
for_each_sched_entity(se) {
#define D(n) (likely(n) ? (n) : 1)
@@ -1086,7 +1098,7 @@ static unsigned long effective_load(struct task_group *tg, int cpu,
S = se->my_q->tg->shares;
s = se->my_q->shares;
- rw = se->my_q->load.weight;
+ rw = se->my_q->rq_weight;
a = S*(rw + wl);
b = S*rw + s*wg;