aboutsummaryrefslogtreecommitdiff
path: root/arch/powerpc/kernel/irq.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-10-28 16:54:49 +0100
committerIngo Molnar <mingo@elte.hu>2008-10-28 16:54:49 +0100
commitd1a76187a5be4f89c6cb19d800cb5fb7aac735c5 (patch)
tree2fac3ffbfffc7560eeef8364b541d0d7a0057920 /arch/powerpc/kernel/irq.c
parentc7e78cff6b7518212247fb20b1dc6411540dc9af (diff)
parent0173a3265b228da319ceb9c1ec6a5682fd1b2d92 (diff)
Merge commit 'v2.6.28-rc2' into core/locking
Conflicts: arch/um/include/asm/system.h
Diffstat (limited to 'arch/powerpc/kernel/irq.c')
-rw-r--r--arch/powerpc/kernel/irq.c169
1 files changed, 85 insertions, 84 deletions
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index d972decf032..ac222d0ab12 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -439,8 +439,8 @@ void do_softirq(void)
static LIST_HEAD(irq_hosts);
static DEFINE_SPINLOCK(irq_big_lock);
-static DEFINE_PER_CPU(unsigned int, irq_radix_reader);
-static unsigned int irq_radix_writer;
+static unsigned int revmap_trees_allocated;
+static DEFINE_MUTEX(revmap_trees_mutex);
struct irq_map_entry irq_map[NR_IRQS];
static unsigned int irq_virq_count = NR_IRQS;
static struct irq_host *irq_default_host;
@@ -583,57 +583,6 @@ void irq_set_virq_count(unsigned int count)
irq_virq_count = count;
}
-/* radix tree not lockless safe ! we use a brlock-type mecanism
- * for now, until we can use a lockless radix tree
- */
-static void irq_radix_wrlock(unsigned long *flags)
-{
- unsigned int cpu, ok;
-
- spin_lock_irqsave(&irq_big_lock, *flags);
- irq_radix_writer = 1;
- smp_mb();
- do {
- barrier();
- ok = 1;
- for_each_possible_cpu(cpu) {
- if (per_cpu(irq_radix_reader, cpu)) {
- ok = 0;
- break;
- }
- }
- if (!ok)
- cpu_relax();
- } while(!ok);
-}
-
-static void irq_radix_wrunlock(unsigned long flags)
-{
- smp_wmb();
- irq_radix_writer = 0;
- spin_unlock_irqrestore(&irq_big_lock, flags);
-}
-
-static void irq_radix_rdlock(unsigned long *flags)
-{
- local_irq_save(*flags);
- __get_cpu_var(irq_radix_reader) = 1;
- smp_mb();
- if (likely(irq_radix_writer == 0))
- return;
- __get_cpu_var(irq_radix_reader) = 0;
- smp_wmb();
- spin_lock(&irq_big_lock);
- __get_cpu_var(irq_radix_reader) = 1;
- spin_unlock(&irq_big_lock);
-}
-
-static void irq_radix_rdunlock(unsigned long flags)
-{
- __get_cpu_var(irq_radix_reader) = 0;
- local_irq_restore(flags);
-}
-
static int irq_setup_virq(struct irq_host *host, unsigned int virq,
irq_hw_number_t hwirq)
{
@@ -788,7 +737,6 @@ void irq_dispose_mapping(unsigned int virq)
{
struct irq_host *host;
irq_hw_number_t hwirq;
- unsigned long flags;
if (virq == NO_IRQ)
return;
@@ -821,12 +769,16 @@ void irq_dispose_mapping(unsigned int virq)
host->revmap_data.linear.revmap[hwirq] = NO_IRQ;
break;
case IRQ_HOST_MAP_TREE:
- /* Check if radix tree allocated yet */
- if (host->revmap_data.tree.gfp_mask == 0)
+ /*
+ * Check if radix tree allocated yet, if not then nothing to
+ * remove.
+ */
+ smp_rmb();
+ if (revmap_trees_allocated < 1)
break;
- irq_radix_wrlock(&flags);
+ mutex_lock(&revmap_trees_mutex);
radix_tree_delete(&host->revmap_data.tree, hwirq);
- irq_radix_wrunlock(flags);
+ mutex_unlock(&revmap_trees_mutex);
break;
}
@@ -875,43 +827,62 @@ unsigned int irq_find_mapping(struct irq_host *host,
EXPORT_SYMBOL_GPL(irq_find_mapping);
-unsigned int irq_radix_revmap(struct irq_host *host,
- irq_hw_number_t hwirq)
+unsigned int irq_radix_revmap_lookup(struct irq_host *host,
+ irq_hw_number_t hwirq)
{
- struct radix_tree_root *tree;
struct irq_map_entry *ptr;
unsigned int virq;
- unsigned long flags;
WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE);
- /* Check if the radix tree exist yet. We test the value of
- * the gfp_mask for that. Sneaky but saves another int in the
- * structure. If not, we fallback to slow mode
+ /*
+ * Check if the radix tree exists and has bee initialized.
+ * If not, we fallback to slow mode
*/
- tree = &host->revmap_data.tree;
- if (tree->gfp_mask == 0)
+ if (revmap_trees_allocated < 2)
return irq_find_mapping(host, hwirq);
/* Now try to resolve */
- irq_radix_rdlock(&flags);
- ptr = radix_tree_lookup(tree, hwirq);
- irq_radix_rdunlock(flags);
+ /*
+ * No rcu_read_lock(ing) needed, the ptr returned can't go under us
+ * as it's referencing an entry in the static irq_map table.
+ */
+ ptr = radix_tree_lookup(&host->revmap_data.tree, hwirq);
- /* Found it, return */
- if (ptr) {
+ /*
+ * If found in radix tree, then fine.
+ * Else fallback to linear lookup - this should not happen in practice
+ * as it means that we failed to insert the node in the radix tree.
+ */
+ if (ptr)
virq = ptr - irq_map;
- return virq;
- }
+ else
+ virq = irq_find_mapping(host, hwirq);
+
+ return virq;
+}
+
+void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq,
+ irq_hw_number_t hwirq)
+{
+
+ WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE);
+
+ /*
+ * Check if the radix tree exists yet.
+ * If not, then the irq will be inserted into the tree when it gets
+ * initialized.
+ */
+ smp_rmb();
+ if (revmap_trees_allocated < 1)
+ return;
- /* If not there, try to insert it */
- virq = irq_find_mapping(host, hwirq);
if (virq != NO_IRQ) {
- irq_radix_wrlock(&flags);
- radix_tree_insert(tree, hwirq, &irq_map[virq]);
- irq_radix_wrunlock(flags);
+ mutex_lock(&revmap_trees_mutex);
+ radix_tree_insert(&host->revmap_data.tree, hwirq,
+ &irq_map[virq]);
+ mutex_unlock(&revmap_trees_mutex);
}
- return virq;
}
unsigned int irq_linear_revmap(struct irq_host *host,
@@ -1020,14 +991,44 @@ void irq_early_init(void)
static int irq_late_init(void)
{
struct irq_host *h;
- unsigned long flags;
+ unsigned int i;
- irq_radix_wrlock(&flags);
+ /*
+ * No mutual exclusion with respect to accessors of the tree is needed
+ * here as the synchronization is done via the state variable
+ * revmap_trees_allocated.
+ */
list_for_each_entry(h, &irq_hosts, link) {
if (h->revmap_type == IRQ_HOST_MAP_TREE)
- INIT_RADIX_TREE(&h->revmap_data.tree, GFP_ATOMIC);
+ INIT_RADIX_TREE(&h->revmap_data.tree, GFP_KERNEL);
+ }
+
+ /*
+ * Make sure the radix trees inits are visible before setting
+ * the flag
+ */
+ smp_wmb();
+ revmap_trees_allocated = 1;
+
+ /*
+ * Insert the reverse mapping for those interrupts already present
+ * in irq_map[].
+ */
+ mutex_lock(&revmap_trees_mutex);
+ for (i = 0; i < irq_virq_count; i++) {
+ if (irq_map[i].host &&
+ (irq_map[i].host->revmap_type == IRQ_HOST_MAP_TREE))
+ radix_tree_insert(&irq_map[i].host->revmap_data.tree,
+ irq_map[i].hwirq, &irq_map[i]);
}
- irq_radix_wrunlock(flags);
+ mutex_unlock(&revmap_trees_mutex);
+
+ /*
+ * Make sure the radix trees insertions are visible before setting
+ * the flag
+ */
+ smp_wmb();
+ revmap_trees_allocated = 2;
return 0;
}