diff options
Diffstat (limited to 'arch/ia64/sn/kernel/irq.c')
| -rw-r--r-- | arch/ia64/sn/kernel/irq.c | 228 |
1 files changed, 136 insertions, 92 deletions
diff --git a/arch/ia64/sn/kernel/irq.c b/arch/ia64/sn/kernel/irq.c index 7bb6ad188ba..85d09515490 100644 --- a/arch/ia64/sn/kernel/irq.c +++ b/arch/ia64/sn/kernel/irq.c @@ -5,12 +5,14 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Copyright (c) 2000-2006 Silicon Graphics, Inc. All Rights Reserved. + * Copyright (c) 2000-2008 Silicon Graphics, Inc. All Rights Reserved. */ #include <linux/irq.h> #include <linux/spinlock.h> #include <linux/init.h> +#include <linux/rculist.h> +#include <linux/slab.h> #include <asm/sn/addrs.h> #include <asm/sn/arch.h> #include <asm/sn/intr.h> @@ -19,12 +21,11 @@ #include <asm/sn/pcidev.h> #include <asm/sn/shub_mmr.h> #include <asm/sn/sn_sal.h> +#include <asm/sn/sn_feature_sets.h> -static void force_interrupt(int irq); static void register_intr_pda(struct sn_irq_info *sn_irq_info); static void unregister_intr_pda(struct sn_irq_info *sn_irq_info); -int sn_force_interrupt_flag = 1; extern int sn_ioif_inited; struct list_head **sn_irq_lh; static DEFINE_SPINLOCK(sn_irq_info_lock); /* non-IRQ lock */ @@ -59,80 +60,74 @@ void sn_intr_free(nasid_t local_nasid, int local_widget, (u64) sn_irq_info->irq_cookie, 0, 0); } -static unsigned int sn_startup_irq(unsigned int irq) +u64 sn_intr_redirect(nasid_t local_nasid, int local_widget, + struct sn_irq_info *sn_irq_info, + nasid_t req_nasid, int req_slice) +{ + struct ia64_sal_retval ret_stuff; + ret_stuff.status = 0; + ret_stuff.v0 = 0; + + SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_INTERRUPT, + (u64) SAL_INTR_REDIRECT, (u64) local_nasid, + (u64) local_widget, __pa(sn_irq_info), + (u64) req_nasid, (u64) req_slice, 0); + + return ret_stuff.status; +} + +static unsigned int sn_startup_irq(struct irq_data *data) { return 0; } -static void sn_shutdown_irq(unsigned int irq) +static void sn_shutdown_irq(struct irq_data *data) { } -static void sn_disable_irq(unsigned int irq) +extern void ia64_mca_register_cpev(int); + +static void sn_disable_irq(struct irq_data *data) { + if (data->irq == local_vector_to_irq(IA64_CPE_VECTOR)) + ia64_mca_register_cpev(0); } -static void sn_enable_irq(unsigned int irq) +static void sn_enable_irq(struct irq_data *data) { + if (data->irq == local_vector_to_irq(IA64_CPE_VECTOR)) + ia64_mca_register_cpev(data->irq); } -static void sn_ack_irq(unsigned int irq) +static void sn_ack_irq(struct irq_data *data) { u64 event_occurred, mask; + unsigned int irq = data->irq & 0xff; - irq = irq & 0xff; event_occurred = HUB_L((u64*)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED)); mask = event_occurred & SH_ALL_INT_MASK; HUB_S((u64*)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED_ALIAS), mask); __set_bit(irq, (volatile void *)pda->sn_in_service_ivecs); - move_native_irq(irq); + irq_move_irq(data); } -static void sn_end_irq(unsigned int irq) -{ - int ivec; - u64 event_occurred; - - ivec = irq & 0xff; - if (ivec == SGI_UART_VECTOR) { - event_occurred = HUB_L((u64*)LOCAL_MMR_ADDR (SH_EVENT_OCCURRED)); - /* If the UART bit is set here, we may have received an - * interrupt from the UART that the driver missed. To - * make sure, we IPI ourselves to force us to look again. - */ - if (event_occurred & SH_EVENT_OCCURRED_UART_INT_MASK) { - platform_send_ipi(smp_processor_id(), SGI_UART_VECTOR, - IA64_IPI_DM_INT, 0); - } - } - __clear_bit(ivec, (volatile void *)pda->sn_in_service_ivecs); - if (sn_force_interrupt_flag) - force_interrupt(irq); -} - -static void sn_irq_info_free(struct rcu_head *head); - struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *sn_irq_info, nasid_t nasid, int slice) { int vector; + int cpuid; +#ifdef CONFIG_SMP int cpuphys; +#endif int64_t bridge; int local_widget, status; nasid_t local_nasid; struct sn_irq_info *new_irq_info; struct sn_pcibus_provider *pci_provider; - new_irq_info = kmalloc(sizeof(struct sn_irq_info), GFP_ATOMIC); - if (new_irq_info == NULL) - return NULL; - - memcpy(new_irq_info, sn_irq_info, sizeof(struct sn_irq_info)); - - bridge = (u64) new_irq_info->irq_bridge; + bridge = (u64) sn_irq_info->irq_bridge; if (!bridge) { - kfree(new_irq_info); return NULL; /* irq is not a device interrupt */ } @@ -142,11 +137,26 @@ struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *sn_irq_info, local_widget = TIO_SWIN_WIDGETNUM(bridge); else local_widget = SWIN_WIDGETNUM(bridge); - vector = sn_irq_info->irq_irq; + + /* Make use of SAL_INTR_REDIRECT if PROM supports it */ + status = sn_intr_redirect(local_nasid, local_widget, sn_irq_info, nasid, slice); + if (!status) { + new_irq_info = sn_irq_info; + goto finish_up; + } + + /* + * PROM does not support SAL_INTR_REDIRECT, or it failed. + * Revert to old method. + */ + new_irq_info = kmemdup(sn_irq_info, sizeof(struct sn_irq_info), + GFP_ATOMIC); + if (new_irq_info == NULL) + return NULL; + /* Free the old PROM new_irq_info structure */ sn_intr_free(local_nasid, local_widget, new_irq_info); - /* Update kernels new_irq_info with new target info */ unregister_intr_pda(new_irq_info); /* allocate a new PROM new_irq_info struct */ @@ -160,9 +170,18 @@ struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *sn_irq_info, return NULL; } - cpuphys = nasid_slice_to_cpuid(nasid, slice); - new_irq_info->irq_cpuid = cpuphys; register_intr_pda(new_irq_info); + spin_lock(&sn_irq_info_lock); + list_replace_rcu(&sn_irq_info->list, &new_irq_info->list); + spin_unlock(&sn_irq_info_lock); + kfree_rcu(sn_irq_info, rcu); + + +finish_up: + /* Update kernels new_irq_info with new target info */ + cpuid = nasid_slice_to_cpuid(new_irq_info->irq_nasid, + new_irq_info->irq_slice); + new_irq_info->irq_cpuid = cpuid; pci_provider = sn_pci_provider[new_irq_info->irq_bridge_type]; @@ -174,43 +193,75 @@ struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *sn_irq_info, pci_provider && pci_provider->target_interrupt) (pci_provider->target_interrupt)(new_irq_info); - spin_lock(&sn_irq_info_lock); - list_replace_rcu(&sn_irq_info->list, &new_irq_info->list); - spin_unlock(&sn_irq_info_lock); - call_rcu(&sn_irq_info->rcu, sn_irq_info_free); - #ifdef CONFIG_SMP + cpuphys = cpu_physical_id(cpuid); set_irq_affinity_info((vector & 0xff), cpuphys, 0); #endif return new_irq_info; } -static void sn_set_affinity_irq(unsigned int irq, cpumask_t mask) +static int sn_set_affinity_irq(struct irq_data *data, + const struct cpumask *mask, bool force) { struct sn_irq_info *sn_irq_info, *sn_irq_info_safe; + unsigned int irq = data->irq; nasid_t nasid; int slice; - nasid = cpuid_to_nasid(first_cpu(mask)); - slice = cpuid_to_slice(first_cpu(mask)); + nasid = cpuid_to_nasid(cpumask_first_and(mask, cpu_online_mask)); + slice = cpuid_to_slice(cpumask_first_and(mask, cpu_online_mask)); list_for_each_entry_safe(sn_irq_info, sn_irq_info_safe, sn_irq_lh[irq], list) (void)sn_retarget_vector(sn_irq_info, nasid, slice); + + return 0; } -struct hw_interrupt_type irq_type_sn = { - .typename = "SN hub", - .startup = sn_startup_irq, - .shutdown = sn_shutdown_irq, - .enable = sn_enable_irq, - .disable = sn_disable_irq, - .ack = sn_ack_irq, - .end = sn_end_irq, - .set_affinity = sn_set_affinity_irq +#ifdef CONFIG_SMP +void sn_set_err_irq_affinity(unsigned int irq) +{ + /* + * On systems which support CPU disabling (SHub2), all error interrupts + * are targeted at the boot CPU. + */ + if (is_shub2() && sn_prom_feature_available(PRF_CPU_DISABLE_SUPPORT)) + set_irq_affinity_info(irq, cpu_physical_id(0), 0); +} +#else +void sn_set_err_irq_affinity(unsigned int irq) { } +#endif + +static void +sn_mask_irq(struct irq_data *data) +{ +} + +static void +sn_unmask_irq(struct irq_data *data) +{ +} + +struct irq_chip irq_type_sn = { + .name = "SN hub", + .irq_startup = sn_startup_irq, + .irq_shutdown = sn_shutdown_irq, + .irq_enable = sn_enable_irq, + .irq_disable = sn_disable_irq, + .irq_ack = sn_ack_irq, + .irq_mask = sn_mask_irq, + .irq_unmask = sn_unmask_irq, + .irq_set_affinity = sn_set_affinity_irq }; +ia64_vector sn_irq_to_vector(int irq) +{ + if (irq >= IA64_NUM_VECTORS) + return 0; + return (ia64_vector)irq; +} + unsigned int sn_local_vector_to_irq(u8 vector) { return (CPU_VECTOR_TO_IRQ(smp_processor_id(), vector)); @@ -219,15 +270,13 @@ unsigned int sn_local_vector_to_irq(u8 vector) void sn_irq_init(void) { int i; - irq_desc_t *base_desc = irq_desc; ia64_first_device_vector = IA64_SN2_FIRST_DEVICE_VECTOR; ia64_last_device_vector = IA64_SN2_LAST_DEVICE_VECTOR; for (i = 0; i < NR_IRQS; i++) { - if (base_desc[i].chip == &no_irq_type) { - base_desc[i].chip = &irq_type_sn; - } + if (irq_get_chip(i) == &no_irq_chip) + irq_set_chip(i, &irq_type_sn); } } @@ -286,19 +335,14 @@ static void unregister_intr_pda(struct sn_irq_info *sn_irq_info) rcu_read_unlock(); } -static void sn_irq_info_free(struct rcu_head *head) -{ - struct sn_irq_info *sn_irq_info; - - sn_irq_info = container_of(head, struct sn_irq_info, rcu); - kfree(sn_irq_info); -} - void sn_irq_fixup(struct pci_dev *pci_dev, struct sn_irq_info *sn_irq_info) { nasid_t nasid = sn_irq_info->irq_nasid; int slice = sn_irq_info->irq_slice; int cpu = nasid_slice_to_cpuid(nasid, slice); +#ifdef CONFIG_SMP + int cpuphys; +#endif pci_dev_get(pci_dev); sn_irq_info->irq_cpuid = cpu; @@ -308,9 +352,20 @@ void sn_irq_fixup(struct pci_dev *pci_dev, struct sn_irq_info *sn_irq_info) spin_lock(&sn_irq_info_lock); list_add_rcu(&sn_irq_info->list, sn_irq_lh[sn_irq_info->irq_irq]); reserve_irq_vector(sn_irq_info->irq_irq); + if (sn_irq_info->irq_int_bit != -1) + irq_set_handler(sn_irq_info->irq_irq, handle_level_irq); spin_unlock(&sn_irq_info_lock); register_intr_pda(sn_irq_info); +#ifdef CONFIG_SMP + cpuphys = cpu_physical_id(cpu); + set_irq_affinity_info(sn_irq_info->irq_irq, cpuphys, 0); + /* + * Affinity was set by the PROM, prevent it from + * being reset by the request_irq() path. + */ + irqd_mark_affinity_was_set(irq_get_irq_data(sn_irq_info->irq_irq)); +#endif } void sn_irq_unfixup(struct pci_dev *pci_dev) @@ -335,7 +390,7 @@ void sn_irq_unfixup(struct pci_dev *pci_dev) spin_unlock(&sn_irq_info_lock); if (list_empty(sn_irq_lh[sn_irq_info->irq_irq])) free_irq_vector(sn_irq_info->irq_irq); - call_rcu(&sn_irq_info->rcu, sn_irq_info_free); + kfree_rcu(sn_irq_info, rcu); pci_dev_put(pci_dev); } @@ -346,22 +401,11 @@ sn_call_force_intr_provider(struct sn_irq_info *sn_irq_info) struct sn_pcibus_provider *pci_provider; pci_provider = sn_pci_provider[sn_irq_info->irq_bridge_type]; - if (pci_provider && pci_provider->force_interrupt) - (*pci_provider->force_interrupt)(sn_irq_info); -} - -static void force_interrupt(int irq) -{ - struct sn_irq_info *sn_irq_info; - - if (!sn_ioif_inited) - return; - - rcu_read_lock(); - list_for_each_entry_rcu(sn_irq_info, sn_irq_lh[irq], list) - sn_call_force_intr_provider(sn_irq_info); - rcu_read_unlock(); + /* Don't force an interrupt if the irq has been disabled */ + if (!irqd_irq_disabled(irq_get_irq_data(sn_irq_info->irq_irq)) && + pci_provider && pci_provider->force_interrupt) + (*pci_provider->force_interrupt)(sn_irq_info); } /* @@ -382,7 +426,7 @@ static void sn_check_intr(int irq, struct sn_irq_info *sn_irq_info) /* * Bridge types attached to TIO (anything but PIC) do not need this WAR * since they do not target Shub II interrupt registers. If that - * ever changes, this check needs to accomodate. + * ever changes, this check needs to accommodate. */ if (sn_irq_info->irq_bridge_type != PCIIO_ASIC_TYPE_PIC) return; |
