From db6a5cef7f474a5bad476a31f4e4c69a69fab8b1 Mon Sep 17 00:00:00 2001 From: Satyam Sharma Date: Tue, 2 Oct 2007 13:39:45 -0700 Subject: [IA64] tree-wide: Misc __cpu{initdata, init, exit} annotations * palinfo.c: palinfo_cpu_notifier is a CPU hotplug notifier_block, and can be marked __cpuinitdata, and the callback function palinfo_cpu_callback() itself can be marked __cpuinit. create_palinfo_proc_entries() is only called from __cpuinit callback or general __init code, therefore a candidate for __cpuinit itself. remove_palinfo_proc_entries() is only called from __cpuinit callback or general __exit code, therefore a candidate for __cpuexit. * salinfo.c: The CPU hotplug notifier_block can be __cpuinitdata. The callback salinfo_cpu_callback() is incorrectly marked __devinit -- it must be __cpuinit instead. * topology.c: cache_sysfs_init() is only called at device_initcall() time so marking it as __cpuinit is wrong and wasteful. It should be unconditionally __init. Also cleanup reference to hotplug notifier callback function from this function and replace with cache_add_dev(), which could also enable us to use other tricks to replace __cpuinit{data} annotations, as recently discussed on this list. cache_shared_cpu_map_setup() is only ever called from __cpuinit-marked functions hence both its definitions (SMP or !SMP) are candidates for __cpuinit itself. Also all_cpu_cache_info can be __cpuinitdata because only referenced from __cpuinit code. Signed-off-by: Satyam Sharma Cc: "Luck, Tony" Signed-off-by: Andrew Morton Signed-off-by: Tony Luck --- arch/ia64/kernel/palinfo.c | 6 +++--- arch/ia64/kernel/salinfo.c | 4 ++-- arch/ia64/kernel/topology.c | 12 ++++++------ 3 files changed, 11 insertions(+), 11 deletions(-) (limited to 'arch/ia64/kernel') diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c index 85829e27785..6ef6ffb943a 100644 --- a/arch/ia64/kernel/palinfo.c +++ b/arch/ia64/kernel/palinfo.c @@ -907,7 +907,7 @@ palinfo_read_entry(char *page, char **start, off_t off, int count, int *eof, voi return len; } -static void +static void __cpuinit create_palinfo_proc_entries(unsigned int cpu) { # define CPUSTR "cpu%d" @@ -968,7 +968,7 @@ remove_palinfo_proc_entries(unsigned int hcpu) } } -static int palinfo_cpu_callback(struct notifier_block *nfb, +static int __cpuinit palinfo_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { unsigned int hotcpu = (unsigned long)hcpu; @@ -986,7 +986,7 @@ static int palinfo_cpu_callback(struct notifier_block *nfb, return NOTIFY_OK; } -static struct notifier_block palinfo_cpu_notifier = +static struct notifier_block palinfo_cpu_notifier __cpuinitdata = { .notifier_call = palinfo_cpu_callback, .priority = 0, diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c index 25cd75f50ab..779c3cca206 100644 --- a/arch/ia64/kernel/salinfo.c +++ b/arch/ia64/kernel/salinfo.c @@ -574,7 +574,7 @@ static const struct file_operations salinfo_data_fops = { .write = salinfo_log_write, }; -static int __devinit +static int __cpuinit salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu) { unsigned int i, cpu = (unsigned long)hcpu; @@ -615,7 +615,7 @@ salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu return NOTIFY_OK; } -static struct notifier_block salinfo_cpu_notifier = +static struct notifier_block salinfo_cpu_notifier __cpuinitdata = { .notifier_call = salinfo_cpu_callback, .priority = 0, diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c index 94ae3c87d82..14261fee5f4 100644 --- a/arch/ia64/kernel/topology.c +++ b/arch/ia64/kernel/topology.c @@ -118,11 +118,11 @@ struct cpu_cache_info { struct kobject kobj; }; -static struct cpu_cache_info all_cpu_cache_info[NR_CPUS]; +static struct cpu_cache_info all_cpu_cache_info[NR_CPUS] __cpuinitdata; #define LEAF_KOBJECT_PTR(x,y) (&all_cpu_cache_info[x].cache_leaves[y]) #ifdef CONFIG_SMP -static void cache_shared_cpu_map_setup( unsigned int cpu, +static void __cpuinit cache_shared_cpu_map_setup( unsigned int cpu, struct cache_info * this_leaf) { pal_cache_shared_info_t csi; @@ -157,7 +157,7 @@ static void cache_shared_cpu_map_setup( unsigned int cpu, &csi) == PAL_STATUS_SUCCESS); } #else -static void cache_shared_cpu_map_setup(unsigned int cpu, +static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, struct cache_info * this_leaf) { cpu_set(cpu, this_leaf->shared_cpu_map); @@ -428,13 +428,13 @@ static struct notifier_block __cpuinitdata cache_cpu_notifier = .notifier_call = cache_cpu_callback }; -static int __cpuinit cache_sysfs_init(void) +static int __init cache_sysfs_init(void) { int i; for_each_online_cpu(i) { - cache_cpu_callback(&cache_cpu_notifier, CPU_ONLINE, - (void *)(long)i); + struct sys_device *sys_dev = get_cpu_sysdev((unsigned int)i); + cache_add_dev(sys_dev); } register_hotcpu_notifier(&cache_cpu_notifier); -- cgit v1.2.3-18-g5258 From 8e75ad8908bd653191b19347ecccb4078cb17b74 Mon Sep 17 00:00:00 2001 From: Satyam Sharma Date: Tue, 2 Oct 2007 13:39:46 -0700 Subject: [IA64] perfmon: Remove exit_pfm_fs() Because it is dead code and not referenced by anybody else (that file cannot be built modular). Signed-off-by: Satyam Sharma Cc: "Luck, Tony" Cc: Stephane Eranian Signed-off-by: Andrew Morton Signed-off-by: Tony Luck --- arch/ia64/kernel/perfmon.c | 7 ------- 1 file changed, 7 deletions(-) (limited to 'arch/ia64/kernel') diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index 14b8e5a6222..f55fa07849c 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c @@ -1538,13 +1538,6 @@ init_pfm_fs(void) return err; } -static void __exit -exit_pfm_fs(void) -{ - unregister_filesystem(&pfm_fs_type); - mntput(pfmfs_mnt); -} - static ssize_t pfm_read(struct file *filp, char __user *buf, size_t size, loff_t *ppos) { -- cgit v1.2.3-18-g5258 From 2bc5c282999af41042c2b703bf3a58ca1d7e3ee2 Mon Sep 17 00:00:00 2001 From: Russ Anderson Date: Thu, 20 Sep 2007 13:59:12 -0500 Subject: [IA64] Remove needless delay in MCA rendezvous While testing the MCA recovery code, noticed that some machines would have a five second delay rendezvousing cpus. What was happening is that ia64_wait_for_slaves() would check to see if all the slave CPUs had rendezvoused. If any had not, it would wait 1 millisecond then check again. If any CPUs had still not rendezvoused, it would wait 5 seconds before checking again. On some configs the rendezvous takes more than 1 millisecond, causing the code to wait the full 5 seconds, even though the last CPU rendezvoused after only a few milliseconds. The fix is to check every 1 millisecond to see if all the cpus have rendezvoused. After 5 seconds the code concludes the CPUs will never rendezvous (same as before). The MCA code is, by definition, not performance critical, but a needless delay of 5 seconds is senseless. The 5 seconds also adds up quickly when running the error injection code in a loop. This patch both simplifies the code and removes the needless delay. Signed-off-by: Russ Anderson Signed-off-by: Tony Luck --- arch/ia64/kernel/mca.c | 41 +++++++++++++++++++---------------------- 1 file changed, 19 insertions(+), 22 deletions(-) (limited to 'arch/ia64/kernel') diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 63b73f3d4c9..92367faecbb 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c @@ -1135,30 +1135,27 @@ no_mod: static void ia64_wait_for_slaves(int monarch, const char *type) { - int c, wait = 0, missing = 0; - for_each_online_cpu(c) { - if (c == monarch) - continue; - if (ia64_mc_info.imi_rendez_checkin[c] == IA64_MCA_RENDEZ_CHECKIN_NOTDONE) { - udelay(1000); /* short wait first */ - wait = 1; - break; - } - } - if (!wait) - goto all_in; - for_each_online_cpu(c) { - if (c == monarch) - continue; - if (ia64_mc_info.imi_rendez_checkin[c] == IA64_MCA_RENDEZ_CHECKIN_NOTDONE) { - udelay(5*1000000); /* wait 5 seconds for slaves (arbitrary) */ - if (ia64_mc_info.imi_rendez_checkin[c] == IA64_MCA_RENDEZ_CHECKIN_NOTDONE) - missing = 1; - break; + int c, i , wait; + + /* + * wait 5 seconds total for slaves (arbitrary) + */ + for (i = 0; i < 5000; i++) { + wait = 0; + for_each_online_cpu(c) { + if (c == monarch) + continue; + if (ia64_mc_info.imi_rendez_checkin[c] + == IA64_MCA_RENDEZ_CHECKIN_NOTDONE) { + udelay(1000); /* short wait */ + wait = 1; + break; + } } + if (!wait) + goto all_in; } - if (!missing) - goto all_in; + /* * Maybe slave(s) dead. Print buffered messages immediately. */ -- cgit v1.2.3-18-g5258 From e1b1eb011e15190eb859bad0bcae67679bda7d50 Mon Sep 17 00:00:00 2001 From: Russ Anderson Date: Wed, 19 Sep 2007 16:58:31 -0500 Subject: [IA64] Fix race when multiple cpus go through MCA Additional testing uncovered a situation where the MCA recovery code could hang due to a race condition. According to the SAL spec, SAL sends a rendezvous interrupt to all but the first CPU that goes into MCA. This includes other CPUs that go into MCA at the same time. Those other CPUs will go into the linux MCA handler (rather than the slave loop) with the rendezvous interrupt pending. When all the CPUs have completed MCA processing and the last monarch completes, freeing all the CPUs, the CPUs with the pended rendezvous interrupt then go into the ia64_mca_rendez_int_handler(). In ia64_mca_rendez_int_handler() the CPUs get marked as rendezvoused, but then leave the handler (due to no MCA). That leaves the CPUs marked as rendezvoused _before_ the next MCA event. When the next MCA hits, the monarch will mistakenly believe that all the CPUs are rendezvoused when they are not, opening up a window where a CPU can get stuck in the slave loop. This patch avoids leaving CPUs marked as rendezvoused when they are not. Signed-off-by: Russ Anderson Signed-off-by: Tony Luck --- arch/ia64/kernel/mca.c | 47 +++++++++++++++++++++++++---------------------- 1 file changed, 25 insertions(+), 22 deletions(-) (limited to 'arch/ia64/kernel') diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 92367faecbb..cc87025e8f5 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c @@ -701,8 +701,7 @@ ia64_mca_cmc_vector_enable_keventd(struct work_struct *unused) /* * ia64_mca_wakeup * - * Send an inter-cpu interrupt to wake-up a particular cpu - * and mark that cpu to be out of rendez. + * Send an inter-cpu interrupt to wake-up a particular cpu. * * Inputs : cpuid * Outputs : None @@ -711,14 +710,12 @@ static void ia64_mca_wakeup(int cpu) { platform_send_ipi(cpu, IA64_MCA_WAKEUP_VECTOR, IA64_IPI_DM_INT, 0); - ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE; - } /* * ia64_mca_wakeup_all * - * Wakeup all the cpus which have rendez'ed previously. + * Wakeup all the slave cpus which have rendez'ed previously. * * Inputs : None * Outputs : None @@ -741,7 +738,10 @@ ia64_mca_wakeup_all(void) * * This is handler used to put slave processors into spinloop * while the monarch processor does the mca handling and later - * wake each slave up once the monarch is done. + * wake each slave up once the monarch is done. The state + * IA64_MCA_RENDEZ_CHECKIN_DONE indicates the cpu is rendez'ed + * in SAL. The state IA64_MCA_RENDEZ_CHECKIN_NOTDONE indicates + * the cpu has come out of OS rendezvous. * * Inputs : None * Outputs : None @@ -778,6 +778,7 @@ ia64_mca_rendez_int_handler(int rendez_irq, void *arg) (long)&nd, 0, 0) == NOTIFY_STOP) ia64_mca_spin(__FUNCTION__); + ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE; /* Enable all interrupts */ local_irq_restore(flags); return IRQ_HANDLED; @@ -1221,26 +1222,27 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw, if (notify_die(DIE_MCA_MONARCH_ENTER, "MCA", regs, (long)&nd, 0, 0) == NOTIFY_STOP) ia64_mca_spin(__FUNCTION__); + + ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_CONCURRENT_MCA; if (sos->monarch) { ia64_wait_for_slaves(cpu, "MCA"); + + /* Wakeup all the processors which are spinning in the + * rendezvous loop. They will leave SAL, then spin in the OS + * with interrupts disabled until this monarch cpu leaves the + * MCA handler. That gets control back to the OS so we can + * backtrace the other cpus, backtrace when spinning in SAL + * does not work. + */ + ia64_mca_wakeup_all(); + if (notify_die(DIE_MCA_MONARCH_PROCESS, "MCA", regs, (long)&nd, 0, 0) + == NOTIFY_STOP) + ia64_mca_spin(__FUNCTION__); } else { - ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_CONCURRENT_MCA; while (cpu_isset(cpu, mca_cpu)) cpu_relax(); /* spin until monarch wakes us */ - ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE; } - /* Wakeup all the processors which are spinning in the rendezvous loop. - * They will leave SAL, then spin in the OS with interrupts disabled - * until this monarch cpu leaves the MCA handler. That gets control - * back to the OS so we can backtrace the other cpus, backtrace when - * spinning in SAL does not work. - */ - ia64_mca_wakeup_all(); - if (notify_die(DIE_MCA_MONARCH_PROCESS, "MCA", regs, (long)&nd, 0, 0) - == NOTIFY_STOP) - ia64_mca_spin(__FUNCTION__); - /* Get the MCA error record and log it */ ia64_mca_log_sal_error_record(SAL_INFO_TYPE_MCA); @@ -1274,21 +1276,22 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw, /* wake up the next monarch cpu, * and put this cpu in the rendez loop. */ - ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_CONCURRENT_MCA; for_each_online_cpu(i) { if (cpu_isset(i, mca_cpu)) { monarch_cpu = i; cpu_clear(i, mca_cpu); /* wake next cpu */ while (monarch_cpu != -1) cpu_relax(); /* spin until last cpu leaves */ - ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE; set_curr_task(cpu, previous_current); + ia64_mc_info.imi_rendez_checkin[cpu] + = IA64_MCA_RENDEZ_CHECKIN_NOTDONE; return; } } } set_curr_task(cpu, previous_current); - monarch_cpu = -1; + ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE; + monarch_cpu = -1; /* This frees the slaves and previous monarchs */ } static DECLARE_WORK(cmc_disable_work, ia64_mca_cmc_vector_disable_keventd); -- cgit v1.2.3-18-g5258 From a62c9fe4637f62e3901f8268778dbc8100281d40 Mon Sep 17 00:00:00 2001 From: Simon Horman Date: Wed, 19 Sep 2007 14:02:20 +0900 Subject: [IA64] Remove vector from ia64_machine_kexec() The use of vector in ia64_machine_kexec() seems spurious, and removing it simplifies the code slightly. As suggested by Alex Williamson Cc: Alex Williamson Signed-off-by: Simon Horman Signed-off-by: Tony Luck --- arch/ia64/kernel/machine_kexec.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) (limited to 'arch/ia64/kernel') diff --git a/arch/ia64/kernel/machine_kexec.c b/arch/ia64/kernel/machine_kexec.c index 4f0f3b8c1ee..58e943a5d95 100644 --- a/arch/ia64/kernel/machine_kexec.c +++ b/arch/ia64/kernel/machine_kexec.c @@ -79,7 +79,6 @@ static void ia64_machine_kexec(struct unw_frame_info *info, void *arg) relocate_new_kernel_t rnk; void *pal_addr = efi_get_pal_addr(); unsigned long code_addr = (unsigned long)page_address(image->control_code_page); - unsigned long vector; int ii; BUG_ON(!image); @@ -107,11 +106,8 @@ static void ia64_machine_kexec(struct unw_frame_info *info, void *arg) /* unmask TPR and clear any pending interrupts */ ia64_setreg(_IA64_REG_CR_TPR, 0); ia64_srlz_d(); - vector = ia64_get_ivr(); - while (vector != IA64_SPURIOUS_INT_VECTOR) { + while (ia64_get_ivr() != IA64_SPURIOUS_INT_VECTOR) ia64_eoi(); - vector = ia64_get_ivr(); - } platform_kernel_launch_event(); rnk = (relocate_new_kernel_t)&code_addr; (*rnk)(image->head, image->start, ia64_boot_param, -- cgit v1.2.3-18-g5258 From ac542a513bd7905fa1a700881e0a40a94d3ed46a Mon Sep 17 00:00:00 2001 From: Kenji Kaneshige Date: Sat, 1 Sep 2007 16:36:26 +0900 Subject: [IA64] Fix kernel panic in kdump on INIT Fix the problem that kdump on INIT causes a kernel panic if kdump kernel image is not configured. The cause of this problem is machine_kexec_on_init() is using printk in INIT context. It should use ia64_mca_printk() instead. Signed-off-by: Kenji Kaneshige Signed-off-by: Tony Luck --- arch/ia64/kernel/crash.c | 2 +- arch/ia64/kernel/mca_drv.h | 2 -- 2 files changed, 1 insertion(+), 3 deletions(-) (limited to 'arch/ia64/kernel') diff --git a/arch/ia64/kernel/crash.c b/arch/ia64/kernel/crash.c index 1d64ef478dd..b2367dd6676 100644 --- a/arch/ia64/kernel/crash.c +++ b/arch/ia64/kernel/crash.c @@ -119,7 +119,7 @@ static void machine_kdump_on_init(void) { if (!ia64_kimage) { - printk(KERN_NOTICE "machine_kdump_on_init(): " + ia64_mca_printk(KERN_NOTICE "machine_kdump_on_init(): " "kdump not configured\n"); return; } diff --git a/arch/ia64/kernel/mca_drv.h b/arch/ia64/kernel/mca_drv.h index c85e943ba5f..485e34d0b19 100644 --- a/arch/ia64/kernel/mca_drv.h +++ b/arch/ia64/kernel/mca_drv.h @@ -118,7 +118,5 @@ struct mca_table_entry { extern const struct mca_table_entry *search_mca_tables (unsigned long addr); extern int mca_recover_range(unsigned long); -extern void ia64_mca_printk(const char * fmt, ...) - __attribute__ ((format (printf, 1, 2))); extern void ia64_mlogbuf_dump(void); -- cgit v1.2.3-18-g5258 From 2010d7fe787b39afd31daba5d5284dd432de4e8f Mon Sep 17 00:00:00 2001 From: Kenji Kaneshige Date: Sat, 1 Sep 2007 16:37:48 +0900 Subject: [IA64] Fix kernel hangup in kdump on INIT Fix the problem that kdump on INIT hung up if kdump kernel image is not configured. The kdump_init_notifier() on monarch CPU stops its operation at DIE_INIT_MONARCH_LEAVE time if the kdump kernel image is not configured. On the other hand, kdump_init_notifier() on non-monarch CPUs get into spin because they don't know the fact the monarch stops its operation. This is the cause of this problem. To fix this problem, we need to check the kdump kernel image at the top of the kdump_init_notifier() function. Signed-off-by: Kenji Kaneshige Signed-off-by: Tony Luck --- arch/ia64/kernel/crash.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) (limited to 'arch/ia64/kernel') diff --git a/arch/ia64/kernel/crash.c b/arch/ia64/kernel/crash.c index b2367dd6676..f1cf2df97a2 100644 --- a/arch/ia64/kernel/crash.c +++ b/arch/ia64/kernel/crash.c @@ -118,11 +118,6 @@ machine_crash_shutdown(struct pt_regs *pt) static void machine_kdump_on_init(void) { - if (!ia64_kimage) { - ia64_mca_printk(KERN_NOTICE "machine_kdump_on_init(): " - "kdump not configured\n"); - return; - } local_irq_disable(); kexec_disable_iosapic(); machine_kexec(ia64_kimage); @@ -156,6 +151,14 @@ kdump_init_notifier(struct notifier_block *self, unsigned long val, void *data) if (!kdump_on_init) return NOTIFY_DONE; + if (!ia64_kimage) { + if (val == DIE_INIT_MONARCH_LEAVE) + ia64_mca_printk(KERN_NOTICE + "%s: kdump not configured\n", + __FUNCTION__); + return NOTIFY_DONE; + } + if (val != DIE_INIT_MONARCH_LEAVE && val != DIE_INIT_SLAVE_LEAVE && val != DIE_INIT_MONARCH_PROCESS && -- cgit v1.2.3-18-g5258