/* $Id: irq.c,v 1.114 2002/01/11 08:45:38 davem Exp $
* irq.c: UltraSparc IRQ handling/init/registry.
*
* Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
* Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz)
*/
#include <linux/config.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/ptrace.h>
#include <linux/errno.h>
#include <linux/kernel_stat.h>
#include <linux/signal.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/random.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/bootmem.h>
#include <asm/ptrace.h>
#include <asm/processor.h>
#include <asm/atomic.h>
#include <asm/system.h>
#include <asm/irq.h>
#include <asm/io.h>
#include <asm/sbus.h>
#include <asm/iommu.h>
#include <asm/upa.h>
#include <asm/oplib.h>
#include <asm/timer.h>
#include <asm/smp.h>
#include <asm/starfire.h>
#include <asm/uaccess.h>
#include <asm/cache.h>
#include <asm/cpudata.h>
#include <asm/auxio.h>
#include <asm/head.h>
#ifdef CONFIG_SMP
static void distribute_irqs(void);
#endif
/* UPA nodes send interrupt packet to UltraSparc with first data reg
* value low 5 (7 on Starfire) bits holding the IRQ identifier being
* delivered. We must translate this into a non-vector IRQ so we can
* set the softint on this cpu.
*
* To make processing these packets efficient and race free we use
* an array of irq buckets below. The interrupt vector handler in
* entry.S feeds incoming packets into per-cpu pil-indexed lists.
* The IVEC handler does not need to act atomically, the PIL dispatch
* code uses CAS to get an atomic snapshot of the list and clear it
* at the same time.
*/
struct ino_bucket ivector_table[NUM_IVECS] __attribute__ ((aligned (SMP_CACHE_BYTES)));
/* This has to be in the main kernel image, it cannot be
* turned into per-cpu data. The reason is that the main
* kernel image is locked into the TLB and this structure
* is accessed from the vectored interrupt trap handler. If
* access to this structure takes a TLB miss it could cause
* the 5-level sparc v9 trap stack to overflow.
*/
#define irq_work(__cpu) &(trap_block[(__cpu)].irq_worklist)
static struct irqaction *irq_action[NR_IRQS+1];
/* This only synchronizes entities which modify IRQ handler
* state and some selected user-level spots that want to
* read things in the table. IRQ handler processing orders
* its' accesses such that no locking is needed.
*/
static DEFINE_SPINLOCK(irq_action_lock);
static void register_irq_proc (unsigned int irq);
/*
* Upper 2b of irqaction->flags holds the ino.
* irqaction->mask holds the smp affinity information.
*/
#define put_ino_in_irqaction(action, irq) \
action->flags &= 0xffffffffffffUL; \
action->flags |= __irq_ino(irq) << 48;
#define get_ino_in_irqaction(action) (action->flags >> 48)
#define put_smpaff_in_irqaction(action, smpaff) (action)->mask = (smpaff)
#define get_smpaff_in_irqaction(action) ((action)->mask)
int show_interrupts(struct seq_file *p, void *v)
{
unsigned long flags;
int i = *(loff_t *) v;
struct irqaction *action;
#ifdef CONFIG_SMP
int j;
#endif
spin_lock_irqsave(&irq_action_lock, flags);
if (i <= NR_IRQS) {
if (!(action = *(i + irq_action)))
goto out_unlock;
seq_printf(p, "%3d: ", i);
#ifndef CONFIG_SMP
seq_printf(p, "%10u ", kstat_irqs(i));
#else
for_each_online_cpu(j) {
seq_printf(p, "%10u ",
kstat_cpu(j).irqs[i]);
}
#endif
seq_printf(p, " %s:%lx", action->name,
get_ino_in_irqaction(action));
for (action = action->next; action; action = action->next) {
seq_printf(p, ", %s:%lx", action->name,
get_ino_in_irqaction(action));
}
seq_putc(p, '\n');
}
out_unlock:
spin_unlock_irqrestore(&irq_action_lock, flags);
return 0;
}
extern unsigned long real_hard_smp_processor_id(void);
static unsigned int sun4u_compute_tid(unsigned long imap, unsigned long cpuid)
{
unsigned int tid;
if (this_is_starfire) {
tid = starfire_translate(imap, cpuid);
tid <<= IMAP_TID_SHIFT;
tid &= IMAP_TID_UPA;
} else {
if (tlb_type == cheetah || tlb_type == cheetah_plus) {
unsigned long ver;
__asm__ ("rdpr %%ver, %0"