diff options
Diffstat (limited to 'arch/blackfin/mach-common/interrupt.S')
| -rw-r--r-- | arch/blackfin/mach-common/interrupt.S | 297 |
1 files changed, 185 insertions, 112 deletions
diff --git a/arch/blackfin/mach-common/interrupt.S b/arch/blackfin/mach-common/interrupt.S index dd45664f0d0..469ce7282dc 100644 --- a/arch/blackfin/mach-common/interrupt.S +++ b/arch/blackfin/mach-common/interrupt.S @@ -1,41 +1,25 @@ /* - * File: arch/blackfin/mach-common/interrupt.S - * Based on: - * Author: D. Jeff Dionne <jeff@ryeham.ee.ryerson.ca> - * Kenneth Albanowski <kjahds@kjahds.com> - * - * Created: ? - * Description: Interrupt Entries - * - * Modified: - * Copyright 2004-2006 Analog Devices Inc. - * - * Bugs: Enter bugs at http://blackfin.uclinux.org/ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. + * Interrupt Entries * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. + * Copyright 2005-2009 Analog Devices Inc. + * D. Jeff Dionne <jeff@ryeham.ee.ryerson.ca> + * Kenneth Albanowski <kjahds@kjahds.com> * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see the file COPYING, or write - * to the Free Software Foundation, Inc., - * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * Licensed under the GPL-2 or later. */ #include <asm/blackfin.h> -#include <asm/mach/irq.h> -#include <linux/autoconf.h> +#include <mach/irq.h> #include <linux/linkage.h> #include <asm/entry.h> #include <asm/asm-offsets.h> +#include <asm/trace.h> +#include <asm/traps.h> +#include <asm/thread_info.h> + +#include <asm/context.S> -#include <asm/mach-common/context.S> +.extern _ret_from_exception #ifdef CONFIG_I_ENTRY_L1 .section .l1.text @@ -45,29 +29,6 @@ .align 4 /* just in case */ -/* - * initial interrupt handlers - */ - -#ifndef CONFIG_KGDB - /* interrupt routine for emulation - 0 */ - /* Currently used only if GDB stub is not in - invalid */ - /* gdb-stub set the evt itself */ - /* save registers for post-mortem only */ -ENTRY(_evt_emulation) - SAVE_ALL_SYS -#ifdef CONFIG_FRAME_POINTER - fp = 0; -#endif - r0 = IRQ_EMU; - r1 = sp; - SP += -12; - call _irq_panic; - SP += 12; - /* - GDB stub fills this in by itself (if defined) */ - rte; -#endif - /* Common interrupt entry code. First we do CLI, then push * RETI, to keep interrupts disabled, but to allow this state to be changed * by local_bh_enable. @@ -127,6 +88,13 @@ __common_int_entry: #else cli r1; #endif +#ifdef CONFIG_TRACE_IRQFLAGS + [--sp] = r0; + sp += -12; + call _trace_hardirqs_off; + sp += 12; + r0 = [sp++]; +#endif [--sp] = RETI; /* orig_pc */ /* Clear all L registers. */ r1 = 0 (x); @@ -138,89 +106,135 @@ __common_int_entry: fp = 0; #endif -#ifdef ANOMALY_05000283 - cc = r7 == r7; - p5.h = 0xffc0; - p5.l = 0x0014; - if cc jump 1f; - r7.l = W[p5]; -1: -#endif + ANOMALY_283_315_WORKAROUND(p5, r7) + r1 = sp; SP += -12; - call _do_irq; +#ifdef CONFIG_IPIPE + call ___ipipe_grab_irq SP += 12; - call _return_from_int; + cc = r0 == 0; + if cc jump .Lcommon_restore_context; +#else /* CONFIG_IPIPE */ + +#ifdef CONFIG_PREEMPT + r7 = sp; + r4.l = lo(ALIGN_PAGE_MASK); + r4.h = hi(ALIGN_PAGE_MASK); + r7 = r7 & r4; + p5 = r7; + r7 = [p5 + TI_PREEMPT]; /* get preempt count */ + r7 += 1; /* increment it */ + [p5 + TI_PREEMPT] = r7; +#endif + pseudo_long_call _do_irq, p2; + +#ifdef CONFIG_PREEMPT + r7 += -1; + [p5 + TI_PREEMPT] = r7; /* restore preempt count */ +#endif + + SP += 12; +#endif /* CONFIG_IPIPE */ + pseudo_long_call _return_from_int, p2; .Lcommon_restore_context: RESTORE_CONTEXT rti; /* interrupt routine for ivhw - 5 */ ENTRY(_evt_ivhw) - SAVE_CONTEXT + /* In case a single action kicks off multiple memory transactions, (like + * a cache line fetch, - this can cause multiple hardware errors, let's + * catch them all. First - make sure all the actions are complete, and + * the core sees the hardware errors. + */ + SSYNC; + SSYNC; + + SAVE_ALL_SYS #ifdef CONFIG_FRAME_POINTER fp = 0; #endif -#ifdef ANOMALY_05000283 - cc = r7 == r7; - p5.h = 0xffc0; - p5.l = 0x0014; - if cc jump 1f; - r7.l = W[p5]; + + ANOMALY_283_315_WORKAROUND(p5, r7) + + /* Handle all stacked hardware errors + * To make sure we don't hang forever, only do it 10 times + */ + R0 = 0; + R2 = 10; 1: -#endif - p0.l = lo(TBUFCTL); - p0.h = hi(TBUFCTL); - r0 = 1; - [p0] = r0; - r0 = IRQ_HWERR; - r1 = sp; - -#ifdef CONFIG_HARDWARE_PM - r7 = SEQSTAT; - r7 = r7 >>> 0xe; - r6 = 0x1F; - r7 = r7 & r6; - r5 = 0x12; - cc = r7 == r5; - if cc jump .Lcall_do_ovf; /* deal with performance counter overflow */ -#endif + P0.L = LO(ILAT); + P0.H = HI(ILAT); + R1 = [P0]; + CC = BITTST(R1, EVT_IVHW_P); + IF ! CC JUMP 2f; + /* OK a hardware error is pending - clear it */ + R1 = EVT_IVHW_P; + [P0] = R1; + R0 += 1; + CC = R1 == R2; + if CC JUMP 2f; + JUMP 1b; +2: + # We are going to dump something out, so make sure we print IPEND properly + p2.l = lo(IPEND); + p2.h = hi(IPEND); + r0 = [p2]; + [sp + PT_IPEND] = r0; - SP += -12; - call _irq_panic; - SP += 12; - rti; -#ifdef CONFIG_HARDWARE_PM -.Lcall_do_ovf: + /* set the EXCAUSE to HWERR for trap_c */ + r0 = [sp + PT_SEQSTAT]; + R1.L = LO(VEC_HWERR); + R1.H = HI(VEC_HWERR); + R0 = R0 | R1; + [sp + PT_SEQSTAT] = R0; + r0 = sp; /* stack frame pt_regs pointer argument ==> r0 */ SP += -12; - call _pm_overflow; + pseudo_long_call _trap_c, p5; SP += 12; - jump .Lcommon_restore_context; +#ifdef EBIU_ERRMST + /* make sure EBIU_ERRMST is clear */ + p0.l = LO(EBIU_ERRMST); + p0.h = HI(EBIU_ERRMST); + r0.l = (CORE_ERROR | CORE_MERROR); + w[p0] = r0.l; #endif -/* interrupt routine for evt2 - 2. This is NMI. */ -ENTRY(_evt_evt2) - SAVE_CONTEXT -#ifdef CONFIG_FRAME_POINTER - fp = 0; -#endif -#ifdef ANOMALY_05000283 - cc = r7 == r7; - p5.h = 0xffc0; - p5.l = 0x0014; - if cc jump 1f; - r7.l = W[p5]; -1: -#endif - r0 = IRQ_NMI; - r1 = sp; + pseudo_long_call _ret_from_exception, p2; + +.Lcommon_restore_all_sys: + RESTORE_ALL_SYS + rti; +ENDPROC(_evt_ivhw) + +/* Interrupt routine for evt2 (NMI). + * For inner circle type details, please see: + * http://docs.blackfin.uclinux.org/doku.php?id=linux-kernel:nmi + */ +ENTRY(_evt_nmi) +#ifndef CONFIG_NMI_WATCHDOG +.weak _evt_nmi +#else + /* Not take account of CPLBs, this handler will not return */ + SAVE_ALL_SYS + r0 = sp; + r1 = retn; + [sp + PT_PC] = r1; + trace_buffer_save(p4,r5); + + ANOMALY_283_315_WORKAROUND(p4, r5) + SP += -12; - call _asm_do_IRQ; + call _do_nmi; SP += 12; - RESTORE_CONTEXT +1: + jump 1b; +#endif rtn; +ENDPROC(_evt_nmi) /* interrupt routine for core timer - 6 */ ENTRY(_evt_timer) @@ -249,5 +263,64 @@ ENTRY(_evt_system_call) #ifdef CONFIG_FRAME_POINTER fp = 0; #endif - call _system_call; + pseudo_long_call _system_call, p2; jump .Lcommon_restore_context; +ENDPROC(_evt_system_call) + +#ifdef CONFIG_IPIPE +/* + * __ipipe_call_irqtail: lowers the current priority level to EVT15 + * before running a user-defined routine, then raises the priority + * level to EVT14 to prepare the caller for a normal interrupt + * return through RTI. + * + * We currently use this feature in two occasions: + * + * - before branching to __ipipe_irq_tail_hook as requested by a high + * priority domain after the pipeline delivered an interrupt, + * e.g. such as Xenomai, in order to start its rescheduling + * procedure, since we may not switch tasks when IRQ levels are + * nested on the Blackfin, so we have to fake an interrupt return + * so that we may reschedule immediately. + * + * - before branching to __ipipe_sync_root(), in order to play any interrupt + * pending for the root domain (i.e. the Linux kernel). This lowers + * the core priority level enough so that Linux IRQ handlers may + * never delay interrupts handled by high priority domains; we defer + * those handlers until this point instead. This is a substitute + * to using a threaded interrupt model for the Linux kernel. + * + * r0: address of user-defined routine + * context: caller must have preempted EVT15, hw interrupts must be off. + */ +ENTRY(___ipipe_call_irqtail) + p0 = r0; + r0.l = 1f; + r0.h = 1f; + reti = r0; + rti; +1: + [--sp] = rets; + [--sp] = ( r7:4, p5:3 ); + sp += -12; + call (p0); + sp += 12; + ( r7:4, p5:3 ) = [sp++]; + rets = [sp++]; + +#ifdef CONFIG_DEBUG_HWERR + /* enable irq14 & hwerr interrupt, until we transition to _evt_evt14 */ + r0 = (EVT_IVG14 | EVT_IVHW | \ + EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU); +#else + /* Only enable irq14 interrupt, until we transition to _evt_evt14 */ + r0 = (EVT_IVG14 | \ + EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU); +#endif + sti r0; + raise 14; /* Branches to _evt_evt14 */ +2: + jump 2b; /* Likely paranoid. */ +ENDPROC(___ipipe_call_irqtail) + +#endif /* CONFIG_IPIPE */ |
