diff options
| author | Rusty Russell <rusty@rustcorp.com.au> | 2009-03-13 14:49:46 +1030 | 
|---|---|---|
| committer | Rusty Russell <rusty@rustcorp.com.au> | 2009-03-13 14:49:46 +1030 | 
| commit | c69fc56de1df5769f2ec69c915c7ad5afe63804c (patch) | |
| tree | 18cc8d2ad5d6643edf8b73a3a7d26c55b2125d25 /kernel | |
| parent | d95c3578120e5bc4784069439f00ccb1b5f87717 (diff) | |
cpumask: use topology_core_cpumask/topology_thread_cpumask instead of cpu_core_map/cpu_sibling_map
Impact: cleanup
This is presumably what those definitions are for, and while all archs
define cpu_core_map/cpu_sibling map, that's changing (eg. x86 wants to
change it to a pointer).
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/sched.c | 8 | 
1 files changed, 4 insertions, 4 deletions
| diff --git a/kernel/sched.c b/kernel/sched.c index 0a76d0b6f21..5dabd80c3c1 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -7249,7 +7249,7 @@ cpu_to_core_group(int cpu, const struct cpumask *cpu_map,  {  	int group; -	cpumask_and(mask, &per_cpu(cpu_sibling_map, cpu), cpu_map); +	cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map);  	group = cpumask_first(mask);  	if (sg)  		*sg = &per_cpu(sched_group_core, group).sg; @@ -7278,7 +7278,7 @@ cpu_to_phys_group(int cpu, const struct cpumask *cpu_map,  	cpumask_and(mask, cpu_coregroup_mask(cpu), cpu_map);  	group = cpumask_first(mask);  #elif defined(CONFIG_SCHED_SMT) -	cpumask_and(mask, &per_cpu(cpu_sibling_map, cpu), cpu_map); +	cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map);  	group = cpumask_first(mask);  #else  	group = cpu; @@ -7621,7 +7621,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,  		SD_INIT(sd, SIBLING);  		set_domain_attribute(sd, attr);  		cpumask_and(sched_domain_span(sd), -			    &per_cpu(cpu_sibling_map, i), cpu_map); +			    topology_thread_cpumask(i), cpu_map);  		sd->parent = p;  		p->child = sd;  		cpu_to_cpu_group(i, cpu_map, &sd->groups, tmpmask); @@ -7632,7 +7632,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,  	/* Set up CPU (sibling) groups */  	for_each_cpu(i, cpu_map) {  		cpumask_and(this_sibling_map, -			    &per_cpu(cpu_sibling_map, i), cpu_map); +			    topology_thread_cpumask(i), cpu_map);  		if (i != cpumask_first(this_sibling_map))  			continue; | 
