diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2009-12-03 18:00:07 +0100 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@suse.de> | 2010-08-10 10:20:35 -0700 |
commit | 4e4a55ada236bf7d10669a2732c6f964bcfdba8d (patch) | |
tree | 0a71a7e3339da67b770f396fca9011a8648081ca /kernel | |
parent | f23dc93c0e6772577309e150daf1906dbfd9d43b (diff) |
sched: cgroup: Implement different treatment for idle shares
commit cd8ad40de36c2fe75f3b731bd70198b385895246 upstream.
When setting the weight for a per-cpu task-group, we have to put in a
phantom weight when there is no work on that cpu, otherwise we'll not
service that cpu when new work gets placed there until we again update
the per-cpu weights.
We used to add these phantom weights to the total, so that the idle
per-cpu shares don't get inflated, this however causes the non-idle
parts to get deflated, causing unexpected weight distibutions.
Reverse this, so that the non-idle shares are correct but the idle
shares are inflated.
Reported-by: Yasunori Goto <y-goto@jp.fujitsu.com>
Tested-by: Yasunori Goto <y-goto@jp.fujitsu.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1257934048.23203.76.camel@twins>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched.c | 8 |
1 files changed, 6 insertions, 2 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index a0157c72c8f..d0958da992d 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -1623,7 +1623,7 @@ static void update_group_shares_cpu(struct task_group *tg, int cpu, */ static int tg_shares_up(struct task_group *tg, void *data) { - unsigned long weight, rq_weight = 0, shares = 0; + unsigned long weight, rq_weight = 0, sum_weight = 0, shares = 0; unsigned long *usd_rq_weight; struct sched_domain *sd = data; unsigned long flags; @@ -1639,6 +1639,7 @@ static int tg_shares_up(struct task_group *tg, void *data) weight = tg->cfs_rq[i]->load.weight; usd_rq_weight[i] = weight; + rq_weight += weight; /* * If there are currently no tasks on the cpu pretend there * is one of average load so that when a new task gets to @@ -1647,10 +1648,13 @@ static int tg_shares_up(struct task_group *tg, void *data) if (!weight) weight = NICE_0_LOAD; - rq_weight += weight; + sum_weight += weight; shares += tg->cfs_rq[i]->shares; } + if (!rq_weight) + rq_weight = sum_weight; + if ((!shares && rq_weight) || shares > tg->shares) shares = tg->shares; |