diff options
Diffstat (limited to 'arch/ia64/kernel/topology.c')
| -rw-r--r-- | arch/ia64/kernel/topology.c | 88 |
1 files changed, 57 insertions, 31 deletions
diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c index a2484fc1a06..f295f9abba4 100644 --- a/arch/ia64/kernel/topology.c +++ b/arch/ia64/kernel/topology.c @@ -17,40 +17,57 @@ #include <linux/kernel.h> #include <linux/mm.h> #include <linux/node.h> +#include <linux/slab.h> #include <linux/init.h> #include <linux/bootmem.h> #include <linux/nodemask.h> #include <linux/notifier.h> +#include <linux/export.h> #include <asm/mmzone.h> #include <asm/numa.h> #include <asm/cpu.h> static struct ia64_cpu *sysfs_cpus; -int arch_register_cpu(int num) +void arch_fix_phys_package_id(int num, u32 slot) { -#if defined (CONFIG_ACPI) && defined (CONFIG_HOTPLUG_CPU) +#ifdef CONFIG_SMP + if (cpu_data(num)->socket_id == -1) + cpu_data(num)->socket_id = slot; +#endif +} +EXPORT_SYMBOL_GPL(arch_fix_phys_package_id); + + +#ifdef CONFIG_HOTPLUG_CPU +int __ref arch_register_cpu(int num) +{ +#ifdef CONFIG_ACPI /* - * If CPEI can be re-targetted or if this is not + * If CPEI can be re-targeted or if this is not * CPEI target, then it is hotpluggable */ if (can_cpei_retarget() || !is_cpu_cpei_target(num)) sysfs_cpus[num].cpu.hotpluggable = 1; map_cpu_to_node(num, node_cpuid[num].nid); #endif - return register_cpu(&sysfs_cpus[num].cpu, num); } +EXPORT_SYMBOL(arch_register_cpu); -#ifdef CONFIG_HOTPLUG_CPU - -void arch_unregister_cpu(int num) +void __ref arch_unregister_cpu(int num) { unregister_cpu(&sysfs_cpus[num].cpu); +#ifdef CONFIG_ACPI unmap_cpu_from_node(num, cpu_to_node(num)); +#endif } -EXPORT_SYMBOL(arch_register_cpu); EXPORT_SYMBOL(arch_unregister_cpu); +#else +static int __init arch_register_cpu(int num) +{ + return register_cpu(&sysfs_cpus[num].cpu, num); +} #endif /*CONFIG_HOTPLUG_CPU*/ @@ -118,11 +135,11 @@ struct cpu_cache_info { struct kobject kobj; }; -static struct cpu_cache_info all_cpu_cache_info[NR_CPUS] __cpuinitdata; +static struct cpu_cache_info all_cpu_cache_info[NR_CPUS]; #define LEAF_KOBJECT_PTR(x,y) (&all_cpu_cache_info[x].cache_leaves[y]) #ifdef CONFIG_SMP -static void __cpuinit cache_shared_cpu_map_setup( unsigned int cpu, +static void cache_shared_cpu_map_setup(unsigned int cpu, struct cache_info * this_leaf) { pal_cache_shared_info_t csi; @@ -157,7 +174,7 @@ static void __cpuinit cache_shared_cpu_map_setup( unsigned int cpu, &csi) == PAL_STATUS_SUCCESS); } #else -static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, +static void cache_shared_cpu_map_setup(unsigned int cpu, struct cache_info * this_leaf) { cpu_set(cpu, this_leaf->shared_cpu_map); @@ -203,8 +220,9 @@ static ssize_t show_shared_cpu_map(struct cache_info *this_leaf, char *buf) ssize_t len; cpumask_t shared_cpu_map; - cpus_and(shared_cpu_map, this_leaf->shared_cpu_map, cpu_online_map); - len = cpumask_scnprintf(buf, NR_CPUS+1, shared_cpu_map); + cpumask_and(&shared_cpu_map, + &this_leaf->shared_cpu_map, cpu_online_mask); + len = cpumask_scnprintf(buf, NR_CPUS+1, &shared_cpu_map); len += sprintf(buf+len, "\n"); return len; } @@ -257,7 +275,7 @@ static struct attribute * cache_default_attrs[] = { #define to_object(k) container_of(k, struct cache_info, kobj) #define to_attr(a) container_of(a, struct cache_attr, attr) -static ssize_t cache_show(struct kobject * kobj, struct attribute * attr, char * buf) +static ssize_t ia64_cache_show(struct kobject * kobj, struct attribute * attr, char * buf) { struct cache_attr *fattr = to_attr(attr); struct cache_info *this_leaf = to_object(kobj); @@ -267,8 +285,8 @@ static ssize_t cache_show(struct kobject * kobj, struct attribute * attr, char * return ret; } -static struct sysfs_ops cache_sysfs_ops = { - .show = cache_show +static const struct sysfs_ops cache_sysfs_ops = { + .show = ia64_cache_show }; static struct kobj_type cache_ktype = { @@ -280,7 +298,7 @@ static struct kobj_type cache_ktype_percpu_entry = { .sysfs_ops = &cache_sysfs_ops, }; -static void __cpuinit cpu_cache_sysfs_exit(unsigned int cpu) +static void cpu_cache_sysfs_exit(unsigned int cpu) { kfree(all_cpu_cache_info[cpu].cache_leaves); all_cpu_cache_info[cpu].cache_leaves = NULL; @@ -289,12 +307,12 @@ static void __cpuinit cpu_cache_sysfs_exit(unsigned int cpu) return; } -static int __cpuinit cpu_cache_sysfs_init(unsigned int cpu) +static int cpu_cache_sysfs_init(unsigned int cpu) { - u64 i, levels, unique_caches; + unsigned long i, levels, unique_caches; pal_cache_config_info_t cci; int j; - s64 status; + long status; struct cache_info *this_cache; int num_cache_leaves = 0; @@ -333,7 +351,7 @@ static int __cpuinit cpu_cache_sysfs_init(unsigned int cpu) } /* Add cache interface for CPU device */ -static int __cpuinit cache_add_dev(struct sys_device * sys_dev) +static int cache_add_dev(struct device *sys_dev) { unsigned int cpu = sys_dev->id; unsigned long i, j; @@ -345,18 +363,22 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev) return 0; oldmask = current->cpus_allowed; - retval = set_cpus_allowed(current, cpumask_of_cpu(cpu)); + retval = set_cpus_allowed_ptr(current, cpumask_of(cpu)); if (unlikely(retval)) return retval; retval = cpu_cache_sysfs_init(cpu); - set_cpus_allowed(current, oldmask); + set_cpus_allowed_ptr(current, &oldmask); if (unlikely(retval < 0)) return retval; retval = kobject_init_and_add(&all_cpu_cache_info[cpu].kobj, &cache_ktype_percpu_entry, &sys_dev->kobj, "%s", "cache"); + if (unlikely(retval < 0)) { + cpu_cache_sysfs_exit(cpu); + return retval; + } for (i = 0; i < all_cpu_cache_info[cpu].num_cache_leaves; i++) { this_object = LEAF_KOBJECT_PTR(cpu,i); @@ -370,7 +392,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev) } kobject_put(&all_cpu_cache_info[cpu].kobj); cpu_cache_sysfs_exit(cpu); - break; + return retval; } kobject_uevent(&(this_object->kobj), KOBJ_ADD); } @@ -379,7 +401,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev) } /* Remove cache interface for CPU device */ -static int __cpuinit cache_remove_dev(struct sys_device * sys_dev) +static int cache_remove_dev(struct device *sys_dev) { unsigned int cpu = sys_dev->id; unsigned long i; @@ -403,13 +425,13 @@ static int __cpuinit cache_remove_dev(struct sys_device * sys_dev) * When a cpu is hot-plugged, do a check and initiate * cache kobject if necessary */ -static int __cpuinit cache_cpu_callback(struct notifier_block *nfb, +static int cache_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned long)hcpu; - struct sys_device *sys_dev; + struct device *sys_dev; - sys_dev = get_cpu_sysdev(cpu); + sys_dev = get_cpu_device(cpu); switch (action) { case CPU_ONLINE: case CPU_ONLINE_FROZEN: @@ -423,7 +445,7 @@ static int __cpuinit cache_cpu_callback(struct notifier_block *nfb, return NOTIFY_OK; } -static struct notifier_block __cpuinitdata cache_cpu_notifier = +static struct notifier_block cache_cpu_notifier = { .notifier_call = cache_cpu_callback }; @@ -432,12 +454,16 @@ static int __init cache_sysfs_init(void) { int i; + cpu_notifier_register_begin(); + for_each_online_cpu(i) { - struct sys_device *sys_dev = get_cpu_sysdev((unsigned int)i); + struct device *sys_dev = get_cpu_device((unsigned int)i); cache_add_dev(sys_dev); } - register_hotcpu_notifier(&cache_cpu_notifier); + __register_hotcpu_notifier(&cache_cpu_notifier); + + cpu_notifier_register_done(); return 0; } |
