diff options
Diffstat (limited to 'kernel/irq')
| -rw-r--r-- | kernel/irq/Kconfig | 10 | ||||
| -rw-r--r-- | kernel/irq/chip.c | 55 | ||||
| -rw-r--r-- | kernel/irq/devres.c | 45 | ||||
| -rw-r--r-- | kernel/irq/handle.c | 5 | ||||
| -rw-r--r-- | kernel/irq/internals.h | 17 | ||||
| -rw-r--r-- | kernel/irq/irqdesc.c | 102 | ||||
| -rw-r--r-- | kernel/irq/irqdomain.c | 20 | ||||
| -rw-r--r-- | kernel/irq/manage.c | 157 | ||||
| -rw-r--r-- | kernel/irq/pm.c | 2 | ||||
| -rw-r--r-- | kernel/irq/proc.c | 8 | ||||
| -rw-r--r-- | kernel/irq/settings.h | 7 | ||||
| -rw-r--r-- | kernel/irq/spurious.c | 118 | 
12 files changed, 440 insertions, 106 deletions
diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig index 4a1fef09f65..d269cecdfbf 100644 --- a/kernel/irq/Kconfig +++ b/kernel/irq/Kconfig @@ -5,6 +5,10 @@ menu "IRQ subsystem"  config MAY_HAVE_SPARSE_IRQ         bool +# Legacy support, required for itanic +config GENERIC_IRQ_LEGACY +       bool +  # Enable the generic irq autoprobe mechanism  config GENERIC_IRQ_PROBE  	bool @@ -17,6 +21,11 @@ config GENERIC_IRQ_SHOW  config GENERIC_IRQ_SHOW_LEVEL         bool +# Facility to allocate a hardware interrupt. This is legacy support +# and should not be used in new code. Use irq domains instead. +config GENERIC_IRQ_LEGACY_ALLOC_HWIRQ +       bool +  # Support for delayed migration from interrupt context  config GENERIC_PENDING_IRQ  	bool @@ -40,6 +49,7 @@ config IRQ_EDGE_EOI_HANDLER  # Generic configurable interrupt chip implementation  config GENERIC_IRQ_CHIP         bool +       select IRQ_DOMAIN  # Generic irq_domain hw <--> linux irq number translation  config IRQ_DOMAIN diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index a3bb14fbe5c..a2b28a2fd7b 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c @@ -40,10 +40,9 @@ int irq_set_chip(unsigned int irq, struct irq_chip *chip)  	irq_put_desc_unlock(desc, flags);  	/*  	 * For !CONFIG_SPARSE_IRQ make the irq show up in -	 * allocated_irqs. For the CONFIG_SPARSE_IRQ case, it is -	 * already marked, and this call is harmless. +	 * allocated_irqs.  	 */ -	irq_reserve_irq(irq); +	irq_mark_irq(irq);  	return 0;  }  EXPORT_SYMBOL(irq_set_chip); @@ -214,7 +213,7 @@ void irq_enable(struct irq_desc *desc)  }  /** - * irq_disable - Mark interupt disabled + * irq_disable - Mark interrupt disabled   * @desc:	irq descriptor which should be disabled   *   * If the chip does not implement the irq_disable callback, we @@ -281,6 +280,19 @@ void unmask_irq(struct irq_desc *desc)  	}  } +void unmask_threaded_irq(struct irq_desc *desc) +{ +	struct irq_chip *chip = desc->irq_data.chip; + +	if (chip->flags & IRQCHIP_EOI_THREADED) +		chip->irq_eoi(&desc->irq_data); + +	if (chip->irq_unmask) { +		chip->irq_unmask(&desc->irq_data); +		irq_state_clr_masked(desc); +	} +} +  /*   *	handle_nested_irq - Handle a nested irq from a irq thread   *	@irq:	the interrupt number @@ -435,6 +447,27 @@ static inline void preflow_handler(struct irq_desc *desc)  static inline void preflow_handler(struct irq_desc *desc) { }  #endif +static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip) +{ +	if (!(desc->istate & IRQS_ONESHOT)) { +		chip->irq_eoi(&desc->irq_data); +		return; +	} +	/* +	 * We need to unmask in the following cases: +	 * - Oneshot irq which did not wake the thread (caused by a +	 *   spurious interrupt or a primary handler handling it +	 *   completely). +	 */ +	if (!irqd_irq_disabled(&desc->irq_data) && +	    irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) { +		chip->irq_eoi(&desc->irq_data); +		unmask_irq(desc); +	} else if (!(chip->flags & IRQCHIP_EOI_THREADED)) { +		chip->irq_eoi(&desc->irq_data); +	} +} +  /**   *	handle_fasteoi_irq - irq handler for transparent controllers   *	@irq:	the interrupt number @@ -448,6 +481,8 @@ static inline void preflow_handler(struct irq_desc *desc) { }  void  handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)  { +	struct irq_chip *chip = desc->irq_data.chip; +  	raw_spin_lock(&desc->lock);  	if (unlikely(irqd_irq_inprogress(&desc->irq_data))) @@ -473,18 +508,14 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)  	preflow_handler(desc);  	handle_irq_event(desc); -	if (desc->istate & IRQS_ONESHOT) -		cond_unmask_irq(desc); +	cond_unmask_eoi_irq(desc, chip); -out_eoi: -	desc->irq_data.chip->irq_eoi(&desc->irq_data); -out_unlock:  	raw_spin_unlock(&desc->lock);  	return;  out: -	if (!(desc->irq_data.chip->flags & IRQCHIP_EOI_IF_HANDLED)) -		goto out_eoi; -	goto out_unlock; +	if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED)) +		chip->irq_eoi(&desc->irq_data); +	raw_spin_unlock(&desc->lock);  }  /** diff --git a/kernel/irq/devres.c b/kernel/irq/devres.c index bd8e788d71e..1ef0606797c 100644 --- a/kernel/irq/devres.c +++ b/kernel/irq/devres.c @@ -73,6 +73,51 @@ int devm_request_threaded_irq(struct device *dev, unsigned int irq,  EXPORT_SYMBOL(devm_request_threaded_irq);  /** + *	devm_request_any_context_irq - allocate an interrupt line for a managed device + *	@dev: device to request interrupt for + *	@irq: Interrupt line to allocate + *	@handler: Function to be called when the IRQ occurs + *	@thread_fn: function to be called in a threaded interrupt context. NULL + *		    for devices which handle everything in @handler + *	@irqflags: Interrupt type flags + *	@devname: An ascii name for the claiming device + *	@dev_id: A cookie passed back to the handler function + * + *	Except for the extra @dev argument, this function takes the + *	same arguments and performs the same function as + *	request_any_context_irq().  IRQs requested with this function will be + *	automatically freed on driver detach. + * + *	If an IRQ allocated with this function needs to be freed + *	separately, devm_free_irq() must be used. + */ +int devm_request_any_context_irq(struct device *dev, unsigned int irq, +			      irq_handler_t handler, unsigned long irqflags, +			      const char *devname, void *dev_id) +{ +	struct irq_devres *dr; +	int rc; + +	dr = devres_alloc(devm_irq_release, sizeof(struct irq_devres), +			  GFP_KERNEL); +	if (!dr) +		return -ENOMEM; + +	rc = request_any_context_irq(irq, handler, irqflags, devname, dev_id); +	if (rc) { +		devres_free(dr); +		return rc; +	} + +	dr->irq = irq; +	dr->dev_id = dev_id; +	devres_add(dev, dr); + +	return 0; +} +EXPORT_SYMBOL(devm_request_any_context_irq); + +/**   *	devm_free_irq - free an interrupt   *	@dev: device to free interrupt for   *	@irq: Interrupt line to free diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index 131ca176b49..63548027085 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c @@ -41,6 +41,7 @@ irqreturn_t no_action(int cpl, void *dev_id)  {  	return IRQ_NONE;  } +EXPORT_SYMBOL_GPL(no_action);  static void warn_no_thread(unsigned int irq, struct irqaction *action)  { @@ -51,7 +52,7 @@ static void warn_no_thread(unsigned int irq, struct irqaction *action)  	       "but no thread function available.", irq, action->name);  } -static void irq_wake_thread(struct irq_desc *desc, struct irqaction *action) +void __irq_wake_thread(struct irq_desc *desc, struct irqaction *action)  {  	/*  	 * In case the thread crashed and was killed we just pretend that @@ -157,7 +158,7 @@ handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)  				break;  			} -			irq_wake_thread(desc, action); +			__irq_wake_thread(desc, action);  			/* Fall through to add to randomness */  		case IRQ_HANDLED: diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index 001fa5bab49..099ea2e0eb8 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h @@ -6,6 +6,7 @@   * of this file for your non core code.   */  #include <linux/irqdesc.h> +#include <linux/kernel_stat.h>  #ifdef CONFIG_SPARSE_IRQ  # define IRQ_BITMAP_BITS	(NR_IRQS + 8196) @@ -32,7 +33,7 @@ enum {  };  /* - * Bit masks for desc->state + * Bit masks for desc->core_internal_state__do_not_mess_with_it   *   * IRQS_AUTODETECT		- autodetection in progress   * IRQS_SPURIOUS_DISABLED	- was disabled due to spurious interrupt @@ -73,6 +74,13 @@ extern void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu);  extern void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu);  extern void mask_irq(struct irq_desc *desc);  extern void unmask_irq(struct irq_desc *desc); +extern void unmask_threaded_irq(struct irq_desc *desc); + +#ifdef CONFIG_SPARSE_IRQ +static inline void irq_mark_irq(unsigned int irq) { } +#else +extern void irq_mark_irq(unsigned int irq); +#endif  extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr); @@ -82,6 +90,7 @@ irqreturn_t handle_irq_event(struct irq_desc *desc);  /* Resending of interrupts :*/  void check_irq_resend(struct irq_desc *desc, unsigned int irq);  bool irq_wait_for_poll(struct irq_desc *desc); +void __irq_wake_thread(struct irq_desc *desc, struct irqaction *action);  #ifdef CONFIG_PROC_FS  extern void register_irq_proc(unsigned int irq, struct irq_desc *desc); @@ -179,3 +188,9 @@ static inline bool irqd_has_set(struct irq_data *d, unsigned int mask)  {  	return d->state_use_accessors & mask;  } + +static inline void kstat_incr_irqs_this_cpu(unsigned int irq, struct irq_desc *desc) +{ +	__this_cpu_inc(*desc->kstat_irqs); +	__this_cpu_inc(kstat.irqs_sum); +} diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index 192a302d6cf..1487a123db5 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c @@ -274,10 +274,16 @@ struct irq_desc *irq_to_desc(unsigned int irq)  {  	return (irq < NR_IRQS) ? irq_desc + irq : NULL;  } +EXPORT_SYMBOL(irq_to_desc);  static void free_desc(unsigned int irq)  { -	dynamic_irq_cleanup(irq); +	struct irq_desc *desc = irq_to_desc(irq); +	unsigned long flags; + +	raw_spin_lock_irqsave(&desc->lock, flags); +	desc_set_defaults(irq, desc, desc_node(desc), NULL); +	raw_spin_unlock_irqrestore(&desc->lock, flags);  }  static inline int alloc_descs(unsigned int start, unsigned int cnt, int node, @@ -298,6 +304,20 @@ static int irq_expand_nr_irqs(unsigned int nr)  	return -ENOMEM;  } +void irq_mark_irq(unsigned int irq) +{ +	mutex_lock(&sparse_irq_lock); +	bitmap_set(allocated_irqs, irq, 1); +	mutex_unlock(&sparse_irq_lock); +} + +#ifdef CONFIG_GENERIC_IRQ_LEGACY +void irq_init_desc(unsigned int irq) +{ +	free_desc(irq); +} +#endif +  #endif /* !CONFIG_SPARSE_IRQ */  /** @@ -362,6 +382,13 @@ __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,  		if (from > irq)  			return -EINVAL;  		from = irq; +	} else { +		/* +		 * For interrupts which are freely allocated the +		 * architecture can force a lower bound to the @from +		 * argument. x86 uses this to exclude the GSI space. +		 */ +		from = arch_dynirq_lower_bound(from);  	}  	mutex_lock(&sparse_irq_lock); @@ -388,30 +415,56 @@ err:  }  EXPORT_SYMBOL_GPL(__irq_alloc_descs); +#ifdef CONFIG_GENERIC_IRQ_LEGACY_ALLOC_HWIRQ  /** - * irq_reserve_irqs - mark irqs allocated - * @from:	mark from irq number - * @cnt:	number of irqs to mark + * irq_alloc_hwirqs - Allocate an irq descriptor and initialize the hardware + * @cnt:	number of interrupts to allocate + * @node:	node on which to allocate   * - * Returns 0 on success or an appropriate error code + * Returns an interrupt number > 0 or 0, if the allocation fails.   */ -int irq_reserve_irqs(unsigned int from, unsigned int cnt) +unsigned int irq_alloc_hwirqs(int cnt, int node)  { -	unsigned int start; -	int ret = 0; +	int i, irq = __irq_alloc_descs(-1, 0, cnt, node, NULL); -	if (!cnt || (from + cnt) > nr_irqs) -		return -EINVAL; +	if (irq < 0) +		return 0; -	mutex_lock(&sparse_irq_lock); -	start = bitmap_find_next_zero_area(allocated_irqs, nr_irqs, from, cnt, 0); -	if (start == from) -		bitmap_set(allocated_irqs, start, cnt); -	else -		ret = -EEXIST; -	mutex_unlock(&sparse_irq_lock); -	return ret; +	for (i = irq; cnt > 0; i++, cnt--) { +		if (arch_setup_hwirq(i, node)) +			goto err; +		irq_clear_status_flags(i, _IRQ_NOREQUEST); +	} +	return irq; + +err: +	for (i--; i >= irq; i--) { +		irq_set_status_flags(i, _IRQ_NOREQUEST | _IRQ_NOPROBE); +		arch_teardown_hwirq(i); +	} +	irq_free_descs(irq, cnt); +	return 0; +} +EXPORT_SYMBOL_GPL(irq_alloc_hwirqs); + +/** + * irq_free_hwirqs - Free irq descriptor and cleanup the hardware + * @from:	Free from irq number + * @cnt:	number of interrupts to free + * + */ +void irq_free_hwirqs(unsigned int from, int cnt) +{ +	int i, j; + +	for (i = from, j = cnt; j > 0; i++, j--) { +		irq_set_status_flags(i, _IRQ_NOREQUEST | _IRQ_NOPROBE); +		arch_teardown_hwirq(i); +	} +	irq_free_descs(from, cnt);  } +EXPORT_SYMBOL_GPL(irq_free_hwirqs); +#endif  /**   * irq_get_next_irq - get next allocated irq number @@ -474,18 +527,9 @@ int irq_set_percpu_devid(unsigned int irq)  	return 0;  } -/** - * dynamic_irq_cleanup - cleanup a dynamically allocated irq - * @irq:	irq number to initialize - */ -void dynamic_irq_cleanup(unsigned int irq) +void kstat_incr_irq_this_cpu(unsigned int irq)  { -	struct irq_desc *desc = irq_to_desc(irq); -	unsigned long flags; - -	raw_spin_lock_irqsave(&desc->lock, flags); -	desc_set_defaults(irq, desc, desc_node(desc), NULL); -	raw_spin_unlock_irqrestore(&desc->lock, flags); +	kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));  }  unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c index 706724e9835..eb5e10e32e0 100644 --- a/kernel/irq/irqdomain.c +++ b/kernel/irq/irqdomain.c @@ -10,6 +10,7 @@  #include <linux/mutex.h>  #include <linux/of.h>  #include <linux/of_address.h> +#include <linux/of_irq.h>  #include <linux/topology.h>  #include <linux/seq_file.h>  #include <linux/slab.h> @@ -26,14 +27,14 @@ static struct irq_domain *irq_default_domain;   * __irq_domain_add() - Allocate a new irq_domain data structure   * @of_node: optional device-tree node of the interrupt controller   * @size: Size of linear map; 0 for radix mapping only + * @hwirq_max: Maximum number of interrupts supported by controller   * @direct_max: Maximum value of direct maps; Use ~0 for no limit; 0 for no   *              direct mapping   * @ops: map/unmap domain callbacks   * @host_data: Controller private data pointer   * - * Allocates and initialize and irq_domain structure.  Caller is expected to - * register allocated irq_domain with irq_domain_register().  Returns pointer - * to IRQ domain, or NULL on failure. + * Allocates and initialize and irq_domain structure. + * Returns pointer to IRQ domain, or NULL on failure.   */  struct irq_domain *__irq_domain_add(struct device_node *of_node, int size,  				    irq_hw_number_t hwirq_max, int direct_max, @@ -465,27 +466,26 @@ int irq_create_strict_mappings(struct irq_domain *domain, unsigned int irq_base,  }  EXPORT_SYMBOL_GPL(irq_create_strict_mappings); -unsigned int irq_create_of_mapping(struct device_node *controller, -				   const u32 *intspec, unsigned int intsize) +unsigned int irq_create_of_mapping(struct of_phandle_args *irq_data)  {  	struct irq_domain *domain;  	irq_hw_number_t hwirq;  	unsigned int type = IRQ_TYPE_NONE;  	unsigned int virq; -	domain = controller ? irq_find_host(controller) : irq_default_domain; +	domain = irq_data->np ? irq_find_host(irq_data->np) : irq_default_domain;  	if (!domain) {  		pr_warn("no irq domain found for %s !\n", -			of_node_full_name(controller)); +			of_node_full_name(irq_data->np));  		return 0;  	}  	/* If domain has no translation, then we assume interrupt line */  	if (domain->ops->xlate == NULL) -		hwirq = intspec[0]; +		hwirq = irq_data->args[0];  	else { -		if (domain->ops->xlate(domain, controller, intspec, intsize, -				     &hwirq, &type)) +		if (domain->ops->xlate(domain, irq_data->np, irq_data->args, +					irq_data->args_count, &hwirq, &type))  			return 0;  	} diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 514bcfd855a..3dc6a61bf06 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -32,24 +32,10 @@ static int __init setup_forced_irqthreads(char *arg)  early_param("threadirqs", setup_forced_irqthreads);  #endif -/** - *	synchronize_irq - wait for pending IRQ handlers (on other CPUs) - *	@irq: interrupt number to wait for - * - *	This function waits for any pending IRQ handlers for this interrupt - *	to complete before returning. If you use this function while - *	holding a resource the IRQ handler may need you will deadlock. - * - *	This function may be called - with care - from IRQ context. - */ -void synchronize_irq(unsigned int irq) +static void __synchronize_hardirq(struct irq_desc *desc)  { -	struct irq_desc *desc = irq_to_desc(irq);  	bool inprogress; -	if (!desc) -		return; -  	do {  		unsigned long flags; @@ -67,12 +53,56 @@ void synchronize_irq(unsigned int irq)  		/* Oops, that failed? */  	} while (inprogress); +} -	/* -	 * We made sure that no hardirq handler is running. Now verify -	 * that no threaded handlers are active. -	 */ -	wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active)); +/** + *	synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs) + *	@irq: interrupt number to wait for + * + *	This function waits for any pending hard IRQ handlers for this + *	interrupt to complete before returning. If you use this + *	function while holding a resource the IRQ handler may need you + *	will deadlock. It does not take associated threaded handlers + *	into account. + * + *	Do not use this for shutdown scenarios where you must be sure + *	that all parts (hardirq and threaded handler) have completed. + * + *	This function may be called - with care - from IRQ context. + */ +void synchronize_hardirq(unsigned int irq) +{ +	struct irq_desc *desc = irq_to_desc(irq); + +	if (desc) +		__synchronize_hardirq(desc); +} +EXPORT_SYMBOL(synchronize_hardirq); + +/** + *	synchronize_irq - wait for pending IRQ handlers (on other CPUs) + *	@irq: interrupt number to wait for + * + *	This function waits for any pending IRQ handlers for this interrupt + *	to complete before returning. If you use this function while + *	holding a resource the IRQ handler may need you will deadlock. + * + *	This function may be called - with care - from IRQ context. + */ +void synchronize_irq(unsigned int irq) +{ +	struct irq_desc *desc = irq_to_desc(irq); + +	if (desc) { +		__synchronize_hardirq(desc); +		/* +		 * We made sure that no hardirq handler is +		 * running. Now verify that no threaded handlers are +		 * active. +		 */ +		wait_event(desc->wait_for_threads, +			   !atomic_read(&desc->threads_active)); +	}  }  EXPORT_SYMBOL(synchronize_irq); @@ -150,7 +180,7 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,  	struct irq_chip *chip = irq_data_get_irq_chip(data);  	int ret; -	ret = chip->irq_set_affinity(data, mask, false); +	ret = chip->irq_set_affinity(data, mask, force);  	switch (ret) {  	case IRQ_SET_MASK_OK:  		cpumask_copy(data->affinity, mask); @@ -162,7 +192,8 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,  	return ret;  } -int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask) +int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask, +			    bool force)  {  	struct irq_chip *chip = irq_data_get_irq_chip(data);  	struct irq_desc *desc = irq_data_to_desc(data); @@ -172,7 +203,7 @@ int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)  		return -EINVAL;  	if (irq_can_move_pcntxt(data)) { -		ret = irq_do_set_affinity(data, mask, false); +		ret = irq_do_set_affinity(data, mask, force);  	} else {  		irqd_set_move_pending(data);  		irq_copy_pending(desc, mask); @@ -187,13 +218,7 @@ int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)  	return ret;  } -/** - *	irq_set_affinity - Set the irq affinity of a given irq - *	@irq:		Interrupt to set affinity - *	@mask:		cpumask - * - */ -int irq_set_affinity(unsigned int irq, const struct cpumask *mask) +int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force)  {  	struct irq_desc *desc = irq_to_desc(irq);  	unsigned long flags; @@ -203,7 +228,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *mask)  		return -EINVAL;  	raw_spin_lock_irqsave(&desc->lock, flags); -	ret =  __irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask); +	ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);  	raw_spin_unlock_irqrestore(&desc->lock, flags);  	return ret;  } @@ -718,7 +743,7 @@ again:  	if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&  	    irqd_irq_masked(&desc->irq_data)) -		unmask_irq(desc); +		unmask_threaded_irq(desc);  out_unlock:  	raw_spin_unlock_irq(&desc->lock); @@ -727,7 +752,7 @@ out_unlock:  #ifdef CONFIG_SMP  /* - * Check whether we need to chasnge the affinity of the interrupt thread. + * Check whether we need to change the affinity of the interrupt thread.   */  static void  irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) @@ -786,7 +811,7 @@ irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)  }  /* - * Interrupts explicitely requested as threaded interupts want to be + * Interrupts explicitly requested as threaded interrupts want to be   * preemtible - many of them need to sleep and wait for slow busses to   * complete.   */ @@ -802,8 +827,7 @@ static irqreturn_t irq_thread_fn(struct irq_desc *desc,  static void wake_threads_waitq(struct irq_desc *desc)  { -	if (atomic_dec_and_test(&desc->threads_active) && -	    waitqueue_active(&desc->wait_for_threads)) +	if (atomic_dec_and_test(&desc->threads_active))  		wake_up(&desc->wait_for_threads);  } @@ -862,8 +886,8 @@ static int irq_thread(void *data)  		irq_thread_check_affinity(desc, action);  		action_ret = handler_fn(desc, action); -		if (!noirqdebug) -			note_interrupt(action->irq, desc, action_ret); +		if (action_ret == IRQ_HANDLED) +			atomic_inc(&desc->threads_handled);  		wake_threads_waitq(desc);  	} @@ -881,6 +905,33 @@ static int irq_thread(void *data)  	return 0;  } +/** + *	irq_wake_thread - wake the irq thread for the action identified by dev_id + *	@irq:		Interrupt line + *	@dev_id:	Device identity for which the thread should be woken + * + */ +void irq_wake_thread(unsigned int irq, void *dev_id) +{ +	struct irq_desc *desc = irq_to_desc(irq); +	struct irqaction *action; +	unsigned long flags; + +	if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc))) +		return; + +	raw_spin_lock_irqsave(&desc->lock, flags); +	for (action = desc->action; action; action = action->next) { +		if (action->dev_id == dev_id) { +			if (action->thread) +				__irq_wake_thread(desc, action); +			break; +		} +	} +	raw_spin_unlock_irqrestore(&desc->lock, flags); +} +EXPORT_SYMBOL_GPL(irq_wake_thread); +  static void irq_setup_forced_threading(struct irqaction *new)  {  	if (!force_irqthreads) @@ -897,6 +948,23 @@ static void irq_setup_forced_threading(struct irqaction *new)  	}  } +static int irq_request_resources(struct irq_desc *desc) +{ +	struct irq_data *d = &desc->irq_data; +	struct irq_chip *c = d->chip; + +	return c->irq_request_resources ? c->irq_request_resources(d) : 0; +} + +static void irq_release_resources(struct irq_desc *desc) +{ +	struct irq_data *d = &desc->irq_data; +	struct irq_chip *c = d->chip; + +	if (c->irq_release_resources) +		c->irq_release_resources(d); +} +  /*   * Internal function to register an irqaction - typically used to   * allocate special interrupts that are part of the architecture. @@ -956,7 +1024,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)  			goto out_mput;  		} -		sched_setscheduler(t, SCHED_FIFO, ¶m); +		sched_setscheduler_nocheck(t, SCHED_FIFO, ¶m);  		/*  		 * We keep the reference to the task struct even if @@ -1092,6 +1160,13 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)  	}  	if (!shared) { +		ret = irq_request_resources(desc); +		if (ret) { +			pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n", +			       new->name, irq, desc->irq_data.chip->name); +			goto out_mask; +		} +  		init_waitqueue_head(&desc->wait_for_threads);  		/* Setup the type (level, edge polarity) if configured: */ @@ -1262,8 +1337,10 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)  	*action_ptr = action->next;  	/* If this was the last handler, shut down the IRQ line: */ -	if (!desc->action) +	if (!desc->action) {  		irq_shutdown(desc); +		irq_release_resources(desc); +	}  #ifdef CONFIG_SMP  	/* make sure affinity_hint is cleaned up */ diff --git a/kernel/irq/pm.c b/kernel/irq/pm.c index cb228bf2176..abcd6ca86cb 100644 --- a/kernel/irq/pm.c +++ b/kernel/irq/pm.c @@ -50,7 +50,7 @@ static void resume_irqs(bool want_early)  		bool is_early = desc->action &&  			desc->action->flags & IRQF_EARLY_RESUME; -		if (is_early != want_early) +		if (!is_early && want_early)  			continue;  		raw_spin_lock_irqsave(&desc->lock, flags); diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c index 36f6ee181b0..ac1ba2f1103 100644 --- a/kernel/irq/proc.c +++ b/kernel/irq/proc.c @@ -324,15 +324,15 @@ void register_irq_proc(unsigned int irq, struct irq_desc *desc)  #ifdef CONFIG_SMP  	/* create /proc/irq/<irq>/smp_affinity */ -	proc_create_data("smp_affinity", 0600, desc->dir, +	proc_create_data("smp_affinity", 0644, desc->dir,  			 &irq_affinity_proc_fops, (void *)(long)irq);  	/* create /proc/irq/<irq>/affinity_hint */ -	proc_create_data("affinity_hint", 0400, desc->dir, +	proc_create_data("affinity_hint", 0444, desc->dir,  			 &irq_affinity_hint_proc_fops, (void *)(long)irq);  	/* create /proc/irq/<irq>/smp_affinity_list */ -	proc_create_data("smp_affinity_list", 0600, desc->dir, +	proc_create_data("smp_affinity_list", 0644, desc->dir,  			 &irq_affinity_list_proc_fops, (void *)(long)irq);  	proc_create_data("node", 0444, desc->dir, @@ -372,7 +372,7 @@ void unregister_handler_proc(unsigned int irq, struct irqaction *action)  static void register_default_affinity_proc(void)  {  #ifdef CONFIG_SMP -	proc_create("irq/default_smp_affinity", 0600, NULL, +	proc_create("irq/default_smp_affinity", 0644, NULL,  		    &default_affinity_proc_fops);  #endif  } diff --git a/kernel/irq/settings.h b/kernel/irq/settings.h index 1162f1030f1..3320b84cc60 100644 --- a/kernel/irq/settings.h +++ b/kernel/irq/settings.h @@ -14,6 +14,7 @@ enum {  	_IRQ_NO_BALANCING	= IRQ_NO_BALANCING,  	_IRQ_NESTED_THREAD	= IRQ_NESTED_THREAD,  	_IRQ_PER_CPU_DEVID	= IRQ_PER_CPU_DEVID, +	_IRQ_IS_POLLED		= IRQ_IS_POLLED,  	_IRQF_MODIFY_MASK	= IRQF_MODIFY_MASK,  }; @@ -26,6 +27,7 @@ enum {  #define IRQ_NOAUTOEN		GOT_YOU_MORON  #define IRQ_NESTED_THREAD	GOT_YOU_MORON  #define IRQ_PER_CPU_DEVID	GOT_YOU_MORON +#define IRQ_IS_POLLED		GOT_YOU_MORON  #undef IRQF_MODIFY_MASK  #define IRQF_MODIFY_MASK	GOT_YOU_MORON @@ -147,3 +149,8 @@ static inline bool irq_settings_is_nested_thread(struct irq_desc *desc)  {  	return desc->status_use_accessors & _IRQ_NESTED_THREAD;  } + +static inline bool irq_settings_is_polled(struct irq_desc *desc) +{ +	return desc->status_use_accessors & _IRQ_IS_POLLED; +} diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c index 7b5f012bde9..e2514b0e439 100644 --- a/kernel/irq/spurious.c +++ b/kernel/irq/spurious.c @@ -67,8 +67,13 @@ static int try_one_irq(int irq, struct irq_desc *desc, bool force)  	raw_spin_lock(&desc->lock); -	/* PER_CPU and nested thread interrupts are never polled */ -	if (irq_settings_is_per_cpu(desc) || irq_settings_is_nested_thread(desc)) +	/* +	 * PER_CPU, nested thread interrupts and interrupts explicitely +	 * marked polled are excluded from polling. +	 */ +	if (irq_settings_is_per_cpu(desc) || +	    irq_settings_is_nested_thread(desc) || +	    irq_settings_is_polled(desc))  		goto out;  	/* @@ -265,14 +270,13 @@ try_misrouted_irq(unsigned int irq, struct irq_desc *desc,  	return action && (action->flags & IRQF_IRQPOLL);  } +#define SPURIOUS_DEFERRED	0x80000000 +  void note_interrupt(unsigned int irq, struct irq_desc *desc,  		    irqreturn_t action_ret)  { -	if (desc->istate & IRQS_POLL_INPROGRESS) -		return; - -	/* we get here again via the threaded handler */ -	if (action_ret == IRQ_WAKE_THREAD) +	if (desc->istate & IRQS_POLL_INPROGRESS || +	    irq_settings_is_polled(desc))  		return;  	if (bad_action_ret(action_ret)) { @@ -280,6 +284,106 @@ void note_interrupt(unsigned int irq, struct irq_desc *desc,  		return;  	} +	/* +	 * We cannot call note_interrupt from the threaded handler +	 * because we need to look at the compound of all handlers +	 * (primary and threaded). Aside of that in the threaded +	 * shared case we have no serialization against an incoming +	 * hardware interrupt while we are dealing with a threaded +	 * result. +	 * +	 * So in case a thread is woken, we just note the fact and +	 * defer the analysis to the next hardware interrupt. +	 * +	 * The threaded handlers store whether they sucessfully +	 * handled an interrupt and we check whether that number +	 * changed versus the last invocation. +	 * +	 * We could handle all interrupts with the delayed by one +	 * mechanism, but for the non forced threaded case we'd just +	 * add pointless overhead to the straight hardirq interrupts +	 * for the sake of a few lines less code. +	 */ +	if (action_ret & IRQ_WAKE_THREAD) { +		/* +		 * There is a thread woken. Check whether one of the +		 * shared primary handlers returned IRQ_HANDLED. If +		 * not we defer the spurious detection to the next +		 * interrupt. +		 */ +		if (action_ret == IRQ_WAKE_THREAD) { +			int handled; +			/* +			 * We use bit 31 of thread_handled_last to +			 * denote the deferred spurious detection +			 * active. No locking necessary as +			 * thread_handled_last is only accessed here +			 * and we have the guarantee that hard +			 * interrupts are not reentrant. +			 */ +			if (!(desc->threads_handled_last & SPURIOUS_DEFERRED)) { +				desc->threads_handled_last |= SPURIOUS_DEFERRED; +				return; +			} +			/* +			 * Check whether one of the threaded handlers +			 * returned IRQ_HANDLED since the last +			 * interrupt happened. +			 * +			 * For simplicity we just set bit 31, as it is +			 * set in threads_handled_last as well. So we +			 * avoid extra masking. And we really do not +			 * care about the high bits of the handled +			 * count. We just care about the count being +			 * different than the one we saw before. +			 */ +			handled = atomic_read(&desc->threads_handled); +			handled |= SPURIOUS_DEFERRED; +			if (handled != desc->threads_handled_last) { +				action_ret = IRQ_HANDLED; +				/* +				 * Note: We keep the SPURIOUS_DEFERRED +				 * bit set. We are handling the +				 * previous invocation right now. +				 * Keep it for the current one, so the +				 * next hardware interrupt will +				 * account for it. +				 */ +				desc->threads_handled_last = handled; +			} else { +				/* +				 * None of the threaded handlers felt +				 * responsible for the last interrupt +				 * +				 * We keep the SPURIOUS_DEFERRED bit +				 * set in threads_handled_last as we +				 * need to account for the current +				 * interrupt as well. +				 */ +				action_ret = IRQ_NONE; +			} +		} else { +			/* +			 * One of the primary handlers returned +			 * IRQ_HANDLED. So we don't care about the +			 * threaded handlers on the same line. Clear +			 * the deferred detection bit. +			 * +			 * In theory we could/should check whether the +			 * deferred bit is set and take the result of +			 * the previous run into account here as +			 * well. But it's really not worth the +			 * trouble. If every other interrupt is +			 * handled we never trigger the spurious +			 * detector. And if this is just the one out +			 * of 100k unhandled ones which is handled +			 * then we merily delay the spurious detection +			 * by one hard interrupt. Not a real problem. +			 */ +			desc->threads_handled_last &= ~SPURIOUS_DEFERRED; +		} +	} +  	if (unlikely(action_ret == IRQ_NONE)) {  		/*  		 * If we are seeing only the odd spurious IRQ caused by  | 
