diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2010-04-16 14:59:29 +0200 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@suse.de> | 2010-09-20 13:18:11 -0700 |
commit | 6efd9bbce0d4b02d295f28054caa74e6edf811b7 (patch) | |
tree | 8b10422793dacadd0b91a40af1c601d3080a1bf3 /kernel | |
parent | ac8f51da79873b84461e77ee2b19d02a253add3e (diff) |
sched: Pre-compute cpumask_weight(sched_domain_span(sd))
commit 669c55e9f99b90e46eaa0f98a67ec53d46dc969a upstream
Dave reported that his large SPARC machines spend lots of time in
hweight64(), try and optimize some of those needless cpumask_weight()
invocations (esp. with the large offstack cpumasks these are very
expensive indeed).
Reported-by: David Miller <davem@davemloft.net>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Mike Galbraith <efault@gmx.de>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched.c | 7 | ||||
-rw-r--r-- | kernel/sched_fair.c | 8 |
2 files changed, 8 insertions, 7 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 7aed676da07..4d8a9c73f04 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -3678,7 +3678,7 @@ unsigned long __weak arch_scale_freq_power(struct sched_domain *sd, int cpu) unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu) { - unsigned long weight = cpumask_weight(sched_domain_span(sd)); + unsigned long weight = sd->span_weight; unsigned long smt_gain = sd->smt_gain; smt_gain /= weight; @@ -3711,7 +3711,7 @@ unsigned long scale_rt_power(int cpu) static void update_cpu_power(struct sched_domain *sd, int cpu) { - unsigned long weight = cpumask_weight(sched_domain_span(sd)); + unsigned long weight = sd->span_weight; unsigned long power = SCHED_LOAD_SCALE; struct sched_group *sdg = sd->groups; @@ -8166,6 +8166,9 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) struct rq *rq = cpu_rq(cpu); struct sched_domain *tmp; + for (tmp = sd; tmp; tmp = tmp->parent) + tmp->span_weight = cpumask_weight(sched_domain_span(tmp)); + /* Remove the sched domains which do not contribute to scheduling. */ for (tmp = sd; tmp; ) { struct sched_domain *parent = tmp->parent; diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 76d9dc0d6ba..9404d6a92e5 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -1520,9 +1520,7 @@ select_task_rq_fair(struct rq *rq, struct task_struct *p, int sd_flag, int wake_ * Pick the largest domain to update shares over */ tmp = sd; - if (affine_sd && (!tmp || - cpumask_weight(sched_domain_span(affine_sd)) > - cpumask_weight(sched_domain_span(sd)))) + if (affine_sd && (!tmp || affine_sd->span_weight > sd->span_weight)) tmp = affine_sd; if (tmp) { @@ -1566,10 +1564,10 @@ select_task_rq_fair(struct rq *rq, struct task_struct *p, int sd_flag, int wake_ /* Now try balancing at a lower domain level of new_cpu */ cpu = new_cpu; - weight = cpumask_weight(sched_domain_span(sd)); + weight = sd->span_weight; sd = NULL; for_each_domain(cpu, tmp) { - if (weight <= cpumask_weight(sched_domain_span(tmp))) + if (weight <= tmp->span_weight) break; if (tmp->flags & sd_flag) sd = tmp; |