diff options
Diffstat (limited to 'arch/tile/include/asm/irqflags.h')
| -rw-r--r-- | arch/tile/include/asm/irqflags.h | 181 |
1 files changed, 113 insertions, 68 deletions
diff --git a/arch/tile/include/asm/irqflags.h b/arch/tile/include/asm/irqflags.h index 45cf67c2f28..71af5747874 100644 --- a/arch/tile/include/asm/irqflags.h +++ b/arch/tile/include/asm/irqflags.h @@ -24,12 +24,14 @@ * the point of view of the generic Linux code. Note that synchronous * interrupts (aka "non-queued") are not blocked by the mask in any case. */ -#if CHIP_HAS_AUX_PERF_COUNTERS() #define LINUX_MASKABLE_INTERRUPTS \ - (~(INT_MASK(INT_PERF_COUNT) | INT_MASK(INT_AUX_PERF_COUNT))) -#else -#define LINUX_MASKABLE_INTERRUPTS \ - (~(INT_MASK(INT_PERF_COUNT))) + (~((_AC(1,ULL) << INT_PERF_COUNT) | (_AC(1,ULL) << INT_AUX_PERF_COUNT))) + +#if CHIP_HAS_SPLIT_INTR_MASK() +/* The same macro, but for the two 32-bit SPRs separately. */ +#define LINUX_MASKABLE_INTERRUPTS_LO (-1) +#define LINUX_MASKABLE_INTERRUPTS_HI \ + (~((1 << (INT_PERF_COUNT - 32)) | (1 << (INT_AUX_PERF_COUNT - 32)))) #endif #ifndef __ASSEMBLY__ @@ -38,7 +40,15 @@ #include <asm/percpu.h> #include <arch/spr_def.h> -/* Set and clear kernel interrupt masks. */ +/* + * Set and clear kernel interrupt masks. + * + * NOTE: __insn_mtspr() is a compiler builtin marked as a memory + * clobber. We rely on it being equivalent to a compiler barrier in + * this code since arch_local_irq_save() and friends must act as + * compiler barriers. This compiler semantic is baked into enough + * places that the compiler will maintain it going forward. + */ #if CHIP_HAS_SPLIT_INTR_MASK() #if INT_PERF_COUNT < 32 || INT_AUX_PERF_COUNT < 32 || INT_MEM_ERROR >= 32 # error Fix assumptions about which word various interrupts are in @@ -47,111 +57,140 @@ int __n = (n); \ int __mask = 1 << (__n & 0x1f); \ if (__n < 32) \ - __insn_mtspr(SPR_INTERRUPT_MASK_SET_1_0, __mask); \ + __insn_mtspr(SPR_INTERRUPT_MASK_SET_K_0, __mask); \ else \ - __insn_mtspr(SPR_INTERRUPT_MASK_SET_1_1, __mask); \ + __insn_mtspr(SPR_INTERRUPT_MASK_SET_K_1, __mask); \ } while (0) #define interrupt_mask_reset(n) do { \ int __n = (n); \ int __mask = 1 << (__n & 0x1f); \ if (__n < 32) \ - __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1_0, __mask); \ + __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_0, __mask); \ else \ - __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1_1, __mask); \ + __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_1, __mask); \ } while (0) #define interrupt_mask_check(n) ({ \ int __n = (n); \ (((__n < 32) ? \ - __insn_mfspr(SPR_INTERRUPT_MASK_1_0) : \ - __insn_mfspr(SPR_INTERRUPT_MASK_1_1)) \ + __insn_mfspr(SPR_INTERRUPT_MASK_K_0) : \ + __insn_mfspr(SPR_INTERRUPT_MASK_K_1)) \ >> (__n & 0x1f)) & 1; \ }) #define interrupt_mask_set_mask(mask) do { \ unsigned long long __m = (mask); \ - __insn_mtspr(SPR_INTERRUPT_MASK_SET_1_0, (unsigned long)(__m)); \ - __insn_mtspr(SPR_INTERRUPT_MASK_SET_1_1, (unsigned long)(__m>>32)); \ + __insn_mtspr(SPR_INTERRUPT_MASK_SET_K_0, (unsigned long)(__m)); \ + __insn_mtspr(SPR_INTERRUPT_MASK_SET_K_1, (unsigned long)(__m>>32)); \ } while (0) #define interrupt_mask_reset_mask(mask) do { \ unsigned long long __m = (mask); \ - __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1_0, (unsigned long)(__m)); \ - __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1_1, (unsigned long)(__m>>32)); \ + __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_0, (unsigned long)(__m)); \ + __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_1, (unsigned long)(__m>>32)); \ +} while (0) +#define interrupt_mask_save_mask() \ + (__insn_mfspr(SPR_INTERRUPT_MASK_SET_K_0) | \ + (((unsigned long long)__insn_mfspr(SPR_INTERRUPT_MASK_SET_K_1))<<32)) +#define interrupt_mask_restore_mask(mask) do { \ + unsigned long long __m = (mask); \ + __insn_mtspr(SPR_INTERRUPT_MASK_K_0, (unsigned long)(__m)); \ + __insn_mtspr(SPR_INTERRUPT_MASK_K_1, (unsigned long)(__m>>32)); \ } while (0) #else #define interrupt_mask_set(n) \ - __insn_mtspr(SPR_INTERRUPT_MASK_SET_1, (1UL << (n))) + __insn_mtspr(SPR_INTERRUPT_MASK_SET_K, (1UL << (n))) #define interrupt_mask_reset(n) \ - __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1, (1UL << (n))) + __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K, (1UL << (n))) #define interrupt_mask_check(n) \ - ((__insn_mfspr(SPR_INTERRUPT_MASK_1) >> (n)) & 1) + ((__insn_mfspr(SPR_INTERRUPT_MASK_K) >> (n)) & 1) #define interrupt_mask_set_mask(mask) \ - __insn_mtspr(SPR_INTERRUPT_MASK_SET_1, (mask)) + __insn_mtspr(SPR_INTERRUPT_MASK_SET_K, (mask)) #define interrupt_mask_reset_mask(mask) \ - __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1, (mask)) + __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K, (mask)) +#define interrupt_mask_save_mask() \ + __insn_mfspr(SPR_INTERRUPT_MASK_K) +#define interrupt_mask_restore_mask(mask) \ + __insn_mtspr(SPR_INTERRUPT_MASK_K, (mask)) #endif /* * The set of interrupts we want active if irqs are enabled. * Note that in particular, the tile timer interrupt comes and goes * from this set, since we have no other way to turn off the timer. - * Likewise, INTCTRL_1 is removed and re-added during device + * Likewise, INTCTRL_K is removed and re-added during device * interrupts, as is the the hardwall UDN_FIREWALL interrupt. * We use a low bit (MEM_ERROR) as our sentinel value and make sure it * is always claimed as an "active interrupt" so we can query that bit * to know our current state. */ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask); -#define INITIAL_INTERRUPTS_ENABLED INT_MASK(INT_MEM_ERROR) +#define INITIAL_INTERRUPTS_ENABLED (1ULL << INT_MEM_ERROR) + +#ifdef CONFIG_DEBUG_PREEMPT +/* Due to inclusion issues, we can't rely on <linux/smp.h> here. */ +extern unsigned int debug_smp_processor_id(void); +# define smp_processor_id() debug_smp_processor_id() +#endif /* Disable interrupts. */ -#define raw_local_irq_disable() \ +#define arch_local_irq_disable() \ interrupt_mask_set_mask(LINUX_MASKABLE_INTERRUPTS) /* Disable all interrupts, including NMIs. */ -#define raw_local_irq_disable_all() \ - interrupt_mask_set_mask(-1UL) +#define arch_local_irq_disable_all() \ + interrupt_mask_set_mask(-1ULL) + +/* + * Read the set of maskable interrupts. + * We avoid the preemption warning here via __this_cpu_ptr since even + * if irqs are already enabled, it's harmless to read the wrong cpu's + * enabled mask. + */ +#define arch_local_irqs_enabled() \ + (*__this_cpu_ptr(&interrupts_enabled_mask)) /* Re-enable all maskable interrupts. */ -#define raw_local_irq_enable() \ - interrupt_mask_reset_mask(__get_cpu_var(interrupts_enabled_mask)) +#define arch_local_irq_enable() \ + interrupt_mask_reset_mask(arch_local_irqs_enabled()) /* Disable or enable interrupts based on flag argument. */ -#define raw_local_irq_restore(disabled) do { \ +#define arch_local_irq_restore(disabled) do { \ if (disabled) \ - raw_local_irq_disable(); \ + arch_local_irq_disable(); \ else \ - raw_local_irq_enable(); \ + arch_local_irq_enable(); \ } while (0) /* Return true if "flags" argument means interrupts are disabled. */ -#define raw_irqs_disabled_flags(flags) ((flags) != 0) +#define arch_irqs_disabled_flags(flags) ((flags) != 0) /* Return true if interrupts are currently disabled. */ -#define raw_irqs_disabled() interrupt_mask_check(INT_MEM_ERROR) +#define arch_irqs_disabled() interrupt_mask_check(INT_MEM_ERROR) /* Save whether interrupts are currently disabled. */ -#define raw_local_save_flags(flags) ((flags) = raw_irqs_disabled()) +#define arch_local_save_flags() arch_irqs_disabled() /* Save whether interrupts are currently disabled, then disable them. */ -#define raw_local_irq_save(flags) \ - do { raw_local_save_flags(flags); raw_local_irq_disable(); } while (0) +#define arch_local_irq_save() ({ \ + unsigned long __flags = arch_local_save_flags(); \ + arch_local_irq_disable(); \ + __flags; }) /* Prevent the given interrupt from being enabled next time we enable irqs. */ -#define raw_local_irq_mask(interrupt) \ - (__get_cpu_var(interrupts_enabled_mask) &= ~INT_MASK(interrupt)) +#define arch_local_irq_mask(interrupt) \ + this_cpu_and(interrupts_enabled_mask, ~(1ULL << (interrupt))) /* Prevent the given interrupt from being enabled immediately. */ -#define raw_local_irq_mask_now(interrupt) do { \ - raw_local_irq_mask(interrupt); \ +#define arch_local_irq_mask_now(interrupt) do { \ + arch_local_irq_mask(interrupt); \ interrupt_mask_set(interrupt); \ } while (0) /* Allow the given interrupt to be enabled next time we enable irqs. */ -#define raw_local_irq_unmask(interrupt) \ - (__get_cpu_var(interrupts_enabled_mask) |= INT_MASK(interrupt)) +#define arch_local_irq_unmask(interrupt) \ + this_cpu_or(interrupts_enabled_mask, (1ULL << (interrupt))) /* Allow the given interrupt to be enabled immediately, if !irqs_disabled. */ -#define raw_local_irq_unmask_now(interrupt) do { \ - raw_local_irq_unmask(interrupt); \ +#define arch_local_irq_unmask_now(interrupt) do { \ + arch_local_irq_unmask(interrupt); \ if (!irqs_disabled()) \ interrupt_mask_reset(interrupt); \ } while (0) @@ -163,19 +202,19 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask); #ifdef __tilegx__ #if INT_MEM_ERROR != 0 -# error Fix IRQ_DISABLED() macro +# error Fix IRQS_DISABLED() macro #endif /* Return 0 or 1 to indicate whether interrupts are currently disabled. */ #define IRQS_DISABLED(tmp) \ - mfspr tmp, INTERRUPT_MASK_1; \ + mfspr tmp, SPR_INTERRUPT_MASK_K; \ andi tmp, tmp, 1 /* Load up a pointer to &interrupts_enabled_mask. */ #define GET_INTERRUPTS_ENABLED_MASK_PTR(reg) \ - moveli reg, hw2_last(interrupts_enabled_mask); \ - shl16insli reg, reg, hw1(interrupts_enabled_mask); \ - shl16insli reg, reg, hw0(interrupts_enabled_mask); \ + moveli reg, hw2_last(interrupts_enabled_mask); \ + shl16insli reg, reg, hw1(interrupts_enabled_mask); \ + shl16insli reg, reg, hw0(interrupts_enabled_mask); \ add reg, reg, tp /* Disable interrupts. */ @@ -183,18 +222,19 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask); moveli tmp0, hw2_last(LINUX_MASKABLE_INTERRUPTS); \ shl16insli tmp0, tmp0, hw1(LINUX_MASKABLE_INTERRUPTS); \ shl16insli tmp0, tmp0, hw0(LINUX_MASKABLE_INTERRUPTS); \ - mtspr INTERRUPT_MASK_SET_1, tmp0 + mtspr SPR_INTERRUPT_MASK_SET_K, tmp0 /* Disable ALL synchronous interrupts (used by NMI entry). */ #define IRQ_DISABLE_ALL(tmp) \ movei tmp, -1; \ - mtspr INTERRUPT_MASK_SET_1, tmp + mtspr SPR_INTERRUPT_MASK_SET_K, tmp /* Enable interrupts. */ -#define IRQ_ENABLE(tmp0, tmp1) \ +#define IRQ_ENABLE_LOAD(tmp0, tmp1) \ GET_INTERRUPTS_ENABLED_MASK_PTR(tmp0); \ - ld tmp0, tmp0; \ - mtspr INTERRUPT_MASK_RESET_1, tmp0 + ld tmp0, tmp0 +#define IRQ_ENABLE_APPLY(tmp0, tmp1) \ + mtspr SPR_INTERRUPT_MASK_RESET_K, tmp0 #else /* !__tilegx__ */ @@ -208,46 +248,51 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask); * (making the original code's write of the "high" mask word idempotent). */ #define IRQS_DISABLED(tmp) \ - mfspr tmp, INTERRUPT_MASK_1_0; \ + mfspr tmp, SPR_INTERRUPT_MASK_K_0; \ shri tmp, tmp, INT_MEM_ERROR; \ andi tmp, tmp, 1 /* Load up a pointer to &interrupts_enabled_mask. */ #define GET_INTERRUPTS_ENABLED_MASK_PTR(reg) \ - moveli reg, lo16(interrupts_enabled_mask); \ - auli reg, reg, ha16(interrupts_enabled_mask);\ + moveli reg, lo16(interrupts_enabled_mask); \ + auli reg, reg, ha16(interrupts_enabled_mask); \ add reg, reg, tp /* Disable interrupts. */ #define IRQ_DISABLE(tmp0, tmp1) \ { \ - movei tmp0, -1; \ - moveli tmp1, lo16(LINUX_MASKABLE_INTERRUPTS) \ + movei tmp0, LINUX_MASKABLE_INTERRUPTS_LO; \ + moveli tmp1, lo16(LINUX_MASKABLE_INTERRUPTS_HI) \ }; \ { \ - mtspr INTERRUPT_MASK_SET_1_0, tmp0; \ - auli tmp1, tmp1, ha16(LINUX_MASKABLE_INTERRUPTS) \ + mtspr SPR_INTERRUPT_MASK_SET_K_0, tmp0; \ + auli tmp1, tmp1, ha16(LINUX_MASKABLE_INTERRUPTS_HI) \ }; \ - mtspr INTERRUPT_MASK_SET_1_1, tmp1 + mtspr SPR_INTERRUPT_MASK_SET_K_1, tmp1 /* Disable ALL synchronous interrupts (used by NMI entry). */ #define IRQ_DISABLE_ALL(tmp) \ movei tmp, -1; \ - mtspr INTERRUPT_MASK_SET_1_0, tmp; \ - mtspr INTERRUPT_MASK_SET_1_1, tmp + mtspr SPR_INTERRUPT_MASK_SET_K_0, tmp; \ + mtspr SPR_INTERRUPT_MASK_SET_K_1, tmp /* Enable interrupts. */ -#define IRQ_ENABLE(tmp0, tmp1) \ +#define IRQ_ENABLE_LOAD(tmp0, tmp1) \ GET_INTERRUPTS_ENABLED_MASK_PTR(tmp0); \ { \ lw tmp0, tmp0; \ addi tmp1, tmp0, 4 \ }; \ - lw tmp1, tmp1; \ - mtspr INTERRUPT_MASK_RESET_1_0, tmp0; \ - mtspr INTERRUPT_MASK_RESET_1_1, tmp1 + lw tmp1, tmp1 +#define IRQ_ENABLE_APPLY(tmp0, tmp1) \ + mtspr SPR_INTERRUPT_MASK_RESET_K_0, tmp0; \ + mtspr SPR_INTERRUPT_MASK_RESET_K_1, tmp1 #endif +#define IRQ_ENABLE(tmp0, tmp1) \ + IRQ_ENABLE_LOAD(tmp0, tmp1); \ + IRQ_ENABLE_APPLY(tmp0, tmp1) + /* * Do the CPU's IRQ-state tracing from assembly code. We call a * C function, but almost everywhere we do, we don't mind clobbering |
