diff options
Diffstat (limited to 'arch/parisc/include/asm/hardirq.h')
| -rw-r--r-- | arch/parisc/include/asm/hardirq.h | 41 |
1 files changed, 29 insertions, 12 deletions
diff --git a/arch/parisc/include/asm/hardirq.h b/arch/parisc/include/asm/hardirq.h index ce93133d511..9b3bd039a60 100644 --- a/arch/parisc/include/asm/hardirq.h +++ b/arch/parisc/include/asm/hardirq.h @@ -1,29 +1,46 @@ /* hardirq.h: PA-RISC hard IRQ support. * * Copyright (C) 2001 Matthew Wilcox <matthew@wil.cx> - * - * The locking is really quite interesting. There's a cpu-local - * count of how many interrupts are being handled, and a global - * lock. An interrupt can only be serviced if the global lock - * is free. You can't be sure no more interrupts are being - * serviced until you've acquired the lock and then checked - * all the per-cpu interrupt counts are all zero. It's a specialised - * br_lock, and that's exactly how Sparc does it. We don't because - * it's more locking for us. This way is lock-free in the interrupt path. + * Copyright (C) 2013 Helge Deller <deller@gmx.de> */ #ifndef _PARISC_HARDIRQ_H #define _PARISC_HARDIRQ_H +#include <linux/cache.h> #include <linux/threads.h> #include <linux/irq.h> +#ifdef CONFIG_IRQSTACKS +#define __ARCH_HAS_DO_SOFTIRQ +#endif + typedef struct { - unsigned long __softirq_pending; /* set_bit is used on this */ + unsigned int __softirq_pending; + unsigned int kernel_stack_usage; + unsigned int irq_stack_usage; +#ifdef CONFIG_SMP + unsigned int irq_resched_count; +#endif + unsigned int irq_unaligned_count; + unsigned int irq_fpassist_count; + unsigned int irq_tlb_count; } ____cacheline_aligned irq_cpustat_t; -#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ +DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); + +#define __ARCH_IRQ_STAT +#define __IRQ_STAT(cpu, member) (irq_stat[cpu].member) +#define inc_irq_stat(member) this_cpu_inc(irq_stat.member) +#define __inc_irq_stat(member) __this_cpu_inc(irq_stat.member) +#define local_softirq_pending() this_cpu_read(irq_stat.__softirq_pending) + +#define __ARCH_SET_SOFTIRQ_PENDING + +#define set_softirq_pending(x) \ + this_cpu_write(irq_stat.__softirq_pending, (x)) +#define or_softirq_pending(x) this_cpu_or(irq_stat.__softirq_pending, (x)) -void ack_bad_irq(unsigned int irq); +#define ack_bad_irq(irq) WARN(1, "unexpected IRQ trap at vector %02x\n", irq) #endif /* _PARISC_HARDIRQ_H */ |
