diff options
Diffstat (limited to 'drivers/xen/events')
| -rw-r--r-- | drivers/xen/events/events_2l.c | 15 | ||||
| -rw-r--r-- | drivers/xen/events/events_base.c | 119 | ||||
| -rw-r--r-- | drivers/xen/events/events_fifo.c | 49 | ||||
| -rw-r--r-- | drivers/xen/events/events_internal.h | 1 |
4 files changed, 84 insertions, 100 deletions
diff --git a/drivers/xen/events/events_2l.c b/drivers/xen/events/events_2l.c index d7ff9175730..5db43fc100a 100644 --- a/drivers/xen/events/events_2l.c +++ b/drivers/xen/events/events_2l.c @@ -166,7 +166,6 @@ static void evtchn_2l_handle_events(unsigned cpu) int start_word_idx, start_bit_idx; int word_idx, bit_idx; int i; - struct irq_desc *desc; struct shared_info *s = HYPERVISOR_shared_info; struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu); @@ -176,11 +175,8 @@ static void evtchn_2l_handle_events(unsigned cpu) unsigned int evtchn = evtchn_from_irq(irq); word_idx = evtchn / BITS_PER_LONG; bit_idx = evtchn % BITS_PER_LONG; - if (active_evtchns(cpu, s, word_idx) & (1ULL << bit_idx)) { - desc = irq_to_desc(irq); - if (desc) - generic_handle_irq_desc(irq, desc); - } + if (active_evtchns(cpu, s, word_idx) & (1ULL << bit_idx)) + generic_handle_irq(irq); } /* @@ -245,11 +241,8 @@ static void evtchn_2l_handle_events(unsigned cpu) port = (word_idx * BITS_PER_EVTCHN_WORD) + bit_idx; irq = get_evtchn_to_irq(port); - if (irq != -1) { - desc = irq_to_desc(irq); - if (desc) - generic_handle_irq_desc(irq, desc); - } + if (irq != -1) + generic_handle_irq(irq); bit_idx = (bit_idx + 1) % BITS_PER_EVTCHN_WORD; diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c index f4a9e331129..c919d3d5c84 100644 --- a/drivers/xen/events/events_base.c +++ b/drivers/xen/events/events_base.c @@ -336,9 +336,8 @@ static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu) BUG_ON(irq == -1); #ifdef CONFIG_SMP - cpumask_copy(irq_to_desc(irq)->irq_data.affinity, cpumask_of(cpu)); + cpumask_copy(irq_get_irq_data(irq)->affinity, cpumask_of(cpu)); #endif - xen_evtchn_port_bind_to_cpu(info, cpu); info->cpu = cpu; @@ -373,10 +372,8 @@ static void xen_irq_init(unsigned irq) { struct irq_info *info; #ifdef CONFIG_SMP - struct irq_desc *desc = irq_to_desc(irq); - /* By default all event channels notify CPU#0. */ - cpumask_copy(desc->irq_data.affinity, cpumask_of(0)); + cpumask_copy(irq_get_irq_data(irq)->affinity, cpumask_of(0)); #endif info = kzalloc(sizeof(*info), GFP_KERNEL); @@ -391,29 +388,22 @@ static void xen_irq_init(unsigned irq) list_add_tail(&info->list, &xen_irq_list_head); } -static int __must_check xen_allocate_irq_dynamic(void) +static int __must_check xen_allocate_irqs_dynamic(int nvec) { - int first = 0; - int irq; + int i, irq = irq_alloc_descs(-1, 0, nvec, -1); -#ifdef CONFIG_X86_IO_APIC - /* - * For an HVM guest or domain 0 which see "real" (emulated or - * actual respectively) GSIs we allocate dynamic IRQs - * e.g. those corresponding to event channels or MSIs - * etc. from the range above those "real" GSIs to avoid - * collisions. - */ - if (xen_initial_domain() || xen_hvm_domain()) - first = get_nr_irqs_gsi(); -#endif + if (irq >= 0) { + for (i = 0; i < nvec; i++) + xen_irq_init(irq + i); + } - irq = irq_alloc_desc_from(first, -1); + return irq; +} - if (irq >= 0) - xen_irq_init(irq); +static inline int __must_check xen_allocate_irq_dynamic(void) +{ - return irq; + return xen_allocate_irqs_dynamic(1); } static int __must_check xen_allocate_irq_gsi(unsigned gsi) @@ -469,9 +459,6 @@ static void xen_evtchn_close(unsigned int port) close.port = port; if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0) BUG(); - - /* Closed ports are implicitly re-bound to VCPU0. */ - bind_evtchn_to_cpu(port, 0); } static void pirq_query_unmask(int irq) @@ -490,13 +477,6 @@ static void pirq_query_unmask(int irq) info->u.pirq.flags |= PIRQ_NEEDS_EOI; } -static bool probing_irq(int irq) -{ - struct irq_desc *desc = irq_to_desc(irq); - - return desc && desc->action == NULL; -} - static void eoi_pirq(struct irq_data *data) { int evtchn = evtchn_from_irq(data->irq); @@ -538,8 +518,7 @@ static unsigned int __startup_pirq(unsigned int irq) BIND_PIRQ__WILL_SHARE : 0; rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq); if (rc != 0) { - if (!probing_irq(irq)) - pr_info("Failed to obtain physical IRQ %d\n", irq); + pr_warn("Failed to obtain physical IRQ %d\n", irq); return 0; } evtchn = bind_pirq.port; @@ -741,22 +720,25 @@ int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc) } int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc, - int pirq, const char *name, domid_t domid) + int pirq, int nvec, const char *name, domid_t domid) { - int irq, ret; + int i, irq, ret; mutex_lock(&irq_mapping_update_lock); - irq = xen_allocate_irq_dynamic(); + irq = xen_allocate_irqs_dynamic(nvec); if (irq < 0) goto out; - irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_edge_irq, - name); + for (i = 0; i < nvec; i++) { + irq_set_chip_and_handler_name(irq + i, &xen_pirq_chip, handle_edge_irq, name); + + ret = xen_irq_info_pirq_setup(irq + i, 0, pirq + i, 0, domid, + i == 0 ? 0 : PIRQ_MSI_GROUP); + if (ret < 0) + goto error_irq; + } - ret = xen_irq_info_pirq_setup(irq, 0, pirq, 0, domid, 0); - if (ret < 0) - goto error_irq; ret = irq_set_msi_desc(irq, msidesc); if (ret < 0) goto error_irq; @@ -764,7 +746,8 @@ out: mutex_unlock(&irq_mapping_update_lock); return irq; error_irq: - __unbind_from_irq(irq); + for (; i >= 0; i--) + __unbind_from_irq(irq + i); mutex_unlock(&irq_mapping_update_lock); return ret; } @@ -772,18 +755,18 @@ error_irq: int xen_destroy_irq(int irq) { - struct irq_desc *desc; struct physdev_unmap_pirq unmap_irq; struct irq_info *info = info_for_irq(irq); int rc = -ENOENT; mutex_lock(&irq_mapping_update_lock); - desc = irq_to_desc(irq); - if (!desc) - goto out; - - if (xen_initial_domain()) { + /* + * If trying to remove a vector in a MSI group different + * than the first one skip the PIRQ unmap unless this vector + * is the first one in the group. + */ + if (xen_initial_domain() && !(info->u.pirq.flags & PIRQ_MSI_GROUP)) { unmap_irq.pirq = info->u.pirq.pirq; unmap_irq.domid = info->u.pirq.domid; rc = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap_irq); @@ -1250,6 +1233,7 @@ void xen_evtchn_do_upcall(struct pt_regs *regs) irq_enter(); #ifdef CONFIG_X86 exit_idle(); + inc_irq_stat(irq_hv_callback_count); #endif __xen_evtchn_do_upcall(); @@ -1339,31 +1323,11 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu) static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest, bool force) { - unsigned tcpu = cpumask_first(dest); + unsigned tcpu = cpumask_first_and(dest, cpu_online_mask); return rebind_irq_to_cpu(data->irq, tcpu); } -static int retrigger_evtchn(int evtchn) -{ - int masked; - - if (!VALID_EVTCHN(evtchn)) - return 0; - - masked = test_and_set_mask(evtchn); - set_evtchn(evtchn); - if (!masked) - unmask_evtchn(evtchn); - - return 1; -} - -int resend_irq_on_evtchn(unsigned int irq) -{ - return retrigger_evtchn(evtchn_from_irq(irq)); -} - static void enable_dynirq(struct irq_data *data) { int evtchn = evtchn_from_irq(data->irq); @@ -1398,7 +1362,18 @@ static void mask_ack_dynirq(struct irq_data *data) static int retrigger_dynirq(struct irq_data *data) { - return retrigger_evtchn(evtchn_from_irq(data->irq)); + unsigned int evtchn = evtchn_from_irq(data->irq); + int masked; + + if (!VALID_EVTCHN(evtchn)) + return 0; + + masked = test_and_set_mask(evtchn); + set_evtchn(evtchn); + if (!masked) + unmask_evtchn(evtchn); + + return 1; } static void restore_pirqs(void) diff --git a/drivers/xen/events/events_fifo.c b/drivers/xen/events/events_fifo.c index 1de2a191b39..84b4bfb8434 100644 --- a/drivers/xen/events/events_fifo.c +++ b/drivers/xen/events/events_fifo.c @@ -66,7 +66,22 @@ static DEFINE_PER_CPU(struct evtchn_fifo_queue, cpu_queue); static event_word_t *event_array[MAX_EVENT_ARRAY_PAGES] __read_mostly; static unsigned event_array_pages __read_mostly; +/* + * sync_set_bit() and friends must be unsigned long aligned on non-x86 + * platforms. + */ +#if !defined(CONFIG_X86) && BITS_PER_LONG > 32 + +#define BM(w) (unsigned long *)((unsigned long)w & ~0x7UL) +#define EVTCHN_FIFO_BIT(b, w) \ + (((unsigned long)w & 0x4UL) ? (EVTCHN_FIFO_ ##b + 32) : EVTCHN_FIFO_ ##b) + +#else + #define BM(w) ((unsigned long *)(w)) +#define EVTCHN_FIFO_BIT(b, w) EVTCHN_FIFO_ ##b + +#endif static inline event_word_t *event_word_from_port(unsigned port) { @@ -161,33 +176,38 @@ static void evtchn_fifo_bind_to_cpu(struct irq_info *info, unsigned cpu) static void evtchn_fifo_clear_pending(unsigned port) { event_word_t *word = event_word_from_port(port); - sync_clear_bit(EVTCHN_FIFO_PENDING, BM(word)); + sync_clear_bit(EVTCHN_FIFO_BIT(PENDING, word), BM(word)); } static void evtchn_fifo_set_pending(unsigned port) { event_word_t *word = event_word_from_port(port); - sync_set_bit(EVTCHN_FIFO_PENDING, BM(word)); + sync_set_bit(EVTCHN_FIFO_BIT(PENDING, word), BM(word)); } static bool evtchn_fifo_is_pending(unsigned port) { event_word_t *word = event_word_from_port(port); - return sync_test_bit(EVTCHN_FIFO_PENDING, BM(word)); + return sync_test_bit(EVTCHN_FIFO_BIT(PENDING, word), BM(word)); } static bool evtchn_fifo_test_and_set_mask(unsigned port) { event_word_t *word = event_word_from_port(port); - return sync_test_and_set_bit(EVTCHN_FIFO_MASKED, BM(word)); + return sync_test_and_set_bit(EVTCHN_FIFO_BIT(MASKED, word), BM(word)); } static void evtchn_fifo_mask(unsigned port) { event_word_t *word = event_word_from_port(port); - sync_set_bit(EVTCHN_FIFO_MASKED, BM(word)); + sync_set_bit(EVTCHN_FIFO_BIT(MASKED, word), BM(word)); } +static bool evtchn_fifo_is_masked(unsigned port) +{ + event_word_t *word = event_word_from_port(port); + return sync_test_bit(EVTCHN_FIFO_BIT(MASKED, word), BM(word)); +} /* * Clear MASKED, spinning if BUSY is set. */ @@ -211,7 +231,7 @@ static void evtchn_fifo_unmask(unsigned port) BUG_ON(!irqs_disabled()); clear_masked(word); - if (sync_test_bit(EVTCHN_FIFO_PENDING, BM(word))) { + if (evtchn_fifo_is_pending(port)) { struct evtchn_unmask unmask = { .port = port }; (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask); } @@ -235,19 +255,15 @@ static uint32_t clear_linked(volatile event_word_t *word) static void handle_irq_for_port(unsigned port) { int irq; - struct irq_desc *desc; irq = get_evtchn_to_irq(port); - if (irq != -1) { - desc = irq_to_desc(irq); - if (desc) - generic_handle_irq_desc(irq, desc); - } + if (irq != -1) + generic_handle_irq(irq); } static void consume_one_event(unsigned cpu, struct evtchn_fifo_control_block *control_block, - unsigned priority, uint32_t *ready) + unsigned priority, unsigned long *ready) { struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu); uint32_t head; @@ -277,10 +293,9 @@ static void consume_one_event(unsigned cpu, * copy of the ready word. */ if (head == 0) - clear_bit(priority, BM(ready)); + clear_bit(priority, ready); - if (sync_test_bit(EVTCHN_FIFO_PENDING, BM(word)) - && !sync_test_bit(EVTCHN_FIFO_MASKED, BM(word))) + if (evtchn_fifo_is_pending(port) && !evtchn_fifo_is_masked(port)) handle_irq_for_port(port); q->head[priority] = head; @@ -289,7 +304,7 @@ static void consume_one_event(unsigned cpu, static void evtchn_fifo_handle_events(unsigned cpu) { struct evtchn_fifo_control_block *control_block; - uint32_t ready; + unsigned long ready; unsigned q; control_block = per_cpu(cpu_control_block, cpu); diff --git a/drivers/xen/events/events_internal.h b/drivers/xen/events/events_internal.h index 677f41a0fff..50c2050a1e3 100644 --- a/drivers/xen/events/events_internal.h +++ b/drivers/xen/events/events_internal.h @@ -53,6 +53,7 @@ struct irq_info { #define PIRQ_NEEDS_EOI (1 << 0) #define PIRQ_SHAREABLE (1 << 1) +#define PIRQ_MSI_GROUP (1 << 2) struct evtchn_ops { unsigned (*max_channels)(void); |
