aboutsummaryrefslogtreecommitdiff
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-11-12 15:55:28 +0100
committerGreg Kroah-Hartman <gregkh@suse.de>2010-09-20 13:18:10 -0700
commit505afcbd47a8588251409191dddd7f888b555c6f (patch)
tree423182c7b46df32d448056f2020252d89e8dfa46 /kernel/sched_fair.c
parent0b88f2ba7caa2d0ff6e3521481a82be084ecdc7b (diff)
sched: Cleanup select_task_rq_fair()
commit a50bde5130f65733142b32975616427d0ea50856 upstream Clean up the new affine to idle sibling bits while trying to grok them. Should not have any function differences. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Mike Galbraith <efault@gmx.de> LKML-Reference: <20091112145610.832503781@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Mike Galbraith <efault@gmx.de> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c73
1 files changed, 51 insertions, 22 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 15a3182baef..b5d2e72af4c 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1384,6 +1384,41 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
}
/*
+ * Try and locate an idle CPU in the sched_domain.
+ */
+static int
+select_idle_sibling(struct task_struct *p, struct sched_domain *sd, int target)
+{
+ int cpu = smp_processor_id();
+ int prev_cpu = task_cpu(p);
+ int i;
+
+ /*
+ * If this domain spans both cpu and prev_cpu (see the SD_WAKE_AFFINE
+ * test in select_task_rq_fair) and the prev_cpu is idle then that's
+ * always a better target than the current cpu.
+ */
+ if (target == cpu) {
+ if (!cpu_rq(prev_cpu)->cfs.nr_running)
+ target = prev_cpu;
+ }
+
+ /*
+ * Otherwise, iterate the domain and find an elegible idle cpu.
+ */
+ if (target == -1 || target == cpu) {
+ for_each_cpu_and(i, sched_domain_span(sd), &p->cpus_allowed) {
+ if (!cpu_rq(i)->cfs.nr_running) {
+ target = i;
+ break;
+ }
+ }
+ }
+
+ return target;
+}
+
+/*
* sched_balance_self: balance the current task (running on cpu) in domains
* that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and
* SD_BALANCE_EXEC.
@@ -1441,36 +1476,30 @@ select_task_rq_fair(struct rq *rq, struct task_struct *p, int sd_flag, int wake_
}
if (want_affine && (tmp->flags & SD_WAKE_AFFINE)) {
- int candidate = -1, i;
+ int target = -1;
+ /*
+ * If both cpu and prev_cpu are part of this domain,
+ * cpu is a valid SD_WAKE_AFFINE target.
+ */
if (cpumask_test_cpu(prev_cpu, sched_domain_span(tmp)))
- candidate = cpu;
+ target = cpu;
/*
- * Check for an idle shared cache.
+ * If there's an idle sibling in this domain, make that
+ * the wake_affine target instead of the current cpu.
+ *
+ * XXX: should we possibly do this outside of
+ * WAKE_AFFINE, in case the shared cache domain is
+ * smaller than the WAKE_AFFINE domain?
*/
- if (tmp->flags & SD_PREFER_SIBLING) {
- if (candidate == cpu) {
- if (!cpu_rq(prev_cpu)->cfs.nr_running)
- candidate = prev_cpu;
- }
-
- if (candidate == -1 || candidate == cpu) {
- for_each_cpu(i, sched_domain_span(tmp)) {
- if (!cpumask_test_cpu(i, &p->cpus_allowed))
- continue;
- if (!cpu_rq(i)->cfs.nr_running) {
- candidate = i;
- break;
- }
- }
- }
- }
+ if (tmp->flags & SD_PREFER_SIBLING)
+ target = select_idle_sibling(p, tmp, target);
- if (candidate >= 0) {
+ if (target >= 0) {
affine_sd = tmp;
want_affine = 0;
- cpu = candidate;
+ cpu = target;
}
}