aboutsummaryrefslogtreecommitdiff
path: root/arch/blackfin/mach-common/entry.S
diff options
context:
space:
mode:
Diffstat (limited to 'arch/blackfin/mach-common/entry.S')
-rw-r--r--arch/blackfin/mach-common/entry.S611
1 files changed, 353 insertions, 258 deletions
diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S
index a063a434f7e..86b5a095c5a 100644
--- a/arch/blackfin/mach-common/entry.S
+++ b/arch/blackfin/mach-common/entry.S
@@ -1,32 +1,11 @@
/*
- * File: arch/blackfin/mach-common/entry.S
- * Based on:
- * Author: Linus Torvalds
+ * Contains the system-call and fault low-level handling routines.
+ * This also contains the timer-interrupt handler, as well as all
+ * interrupts and faults that can result in a task-switch.
*
- * Created: ?
- * Description: contains the system-call and fault low-level handling routines.
- * This also contains the timer-interrupt handler, as well as all
- * interrupts and faults that can result in a task-switch.
+ * Copyright 2005-2009 Analog Devices Inc.
*
- * Modified:
- * Copyright 2004-2006 Analog Devices Inc.
- *
- * Bugs: Enter bugs at http://blackfin.uclinux.org/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see the file COPYING, or write
- * to the Free Software Foundation, Inc.,
- * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * Licensed under the GPL-2 or later.
*/
/* NOTE: This code handles signal-recognition, which happens every time
@@ -36,23 +15,16 @@
#include <linux/init.h>
#include <linux/linkage.h>
#include <linux/unistd.h>
-#include <linux/threads.h>
#include <asm/blackfin.h>
#include <asm/errno.h>
#include <asm/fixed_code.h>
#include <asm/thread_info.h> /* TIF_NEED_RESCHED */
#include <asm/asm-offsets.h>
#include <asm/trace.h>
+#include <asm/traps.h>
#include <asm/context.S>
-#if defined(CONFIG_BFIN_SCRATCH_REG_RETN)
-# define EX_SCRATCH_REG RETN
-#elif defined(CONFIG_BFIN_SCRATCH_REG_RETE)
-# define EX_SCRATCH_REG RETE
-#else
-# define EX_SCRATCH_REG CYCLES
-#endif
#ifdef CONFIG_EXCPT_IRQ_SYSC_L1
.section .l1.text
@@ -85,13 +57,15 @@ ENTRY(_ex_workaround_261)
if !cc jump _bfin_return_from_exception;
/* fall through */
R7 = P4;
- R6 = 0x26; /* Data CPLB Miss */
+ R6 = VEC_CPLB_M; /* Data CPLB Miss */
cc = R6 == R7;
if cc jump _ex_dcplb_miss (BP);
- R6 = 0x23; /* Data CPLB Miss */
+#ifdef CONFIG_MPU
+ R6 = VEC_CPLB_VL; /* Data CPLB Violation */
cc = R6 == R7;
if cc jump _ex_dcplb_viol (BP);
- /* Handle 0x23 Data CPLB Protection Violation
+#endif
+ /* Handle Data CPLB Protection Violation
* and Data CPLB Multiple Hits - Linux Trap Zero
*/
jump _ex_trap_c;
@@ -201,11 +175,22 @@ ENTRY(_ex_single_step)
cc = r7 == 0;
if !cc jump 1f;
#endif
-
+#ifdef CONFIG_EXACT_HWERR
+ /* Read the ILAT, and to check to see if the process we are
+ * single stepping caused a previous hardware error
+ * If so, do not single step, (which lowers to IRQ5, and makes
+ * us miss the error).
+ */
+ p5.l = lo(ILAT);
+ p5.h = hi(ILAT);
+ r7 = [p5];
+ cc = bittst(r7, EVT_IVHW_P);
+ if cc jump 1f;
+#endif
/* Single stepping only a single instruction, so clear the trace
* bit here. */
r7 = syscfg;
- bitclr (r7, 0);
+ bitclr (r7, SYSCFG_SSSTEP_P);
syscfg = R7;
jump _ex_trap_c;
@@ -238,7 +223,7 @@ ENTRY(_ex_single_step)
if !cc jump _bfin_return_from_exception;
r7 = syscfg;
- bitclr (r7, 0);
+ bitclr (r7, SYSCFG_SSSTEP_P); /* Turn off single step */
syscfg = R7;
/* Fall through to _bfin_return_from_exception. */
@@ -260,16 +245,7 @@ ENTRY(_bfin_return_from_exception)
r6.l = lo(SEQSTAT_EXCAUSE);
r6.h = hi(SEQSTAT_EXCAUSE);
r7 = r7 & r6;
- r6 = 0x25;
- CC = R7 == R6;
- if CC JUMP _double_fault;
-
- /* Did we cause a HW error? */
- p5.l = lo(ILAT);
- p5.h = hi(ILAT);
- r6 = [p5];
- r7 = 0x20; /* Did I just cause anther HW error? */
- r6 = r7 & r6;
+ r6 = VEC_UNCOV;
CC = R7 == R6;
if CC JUMP _double_fault;
#endif
@@ -285,7 +261,7 @@ ENTRY(_handle_bad_cplb)
/* To get here, we just tried and failed to change a CPLB
* so, handle things in trap_c (C code), by lowering to
* IRQ5, just like we normally do. Since this is not a
- * "normal" return path, we have a do alot of stuff to
+ * "normal" return path, we have a do a lot of stuff to
* the stack to get ready so, we can fall through - we
* need to make a CPLB exception look like a normal exception
*/
@@ -297,27 +273,31 @@ ENTRY(_ex_replaceable)
nop;
ENTRY(_ex_trap_c)
+ /* The only thing that has been saved in this context is
+ * (R7:6,P5:4), ASTAT & SP - don't use anything else
+ */
+
+ GET_PDA(p5, r6);
+
/* Make sure we are not in a double fault */
p4.l = lo(IPEND);
p4.h = hi(IPEND);
r7 = [p4];
CC = BITTST (r7, 5);
if CC jump _double_fault;
+ [p5 + PDA_EXIPEND] = r7;
/* Call C code (trap_c) to handle the exception, which most
* likely involves sending a signal to the current process.
* To avoid double faults, lower our priority to IRQ5 first.
*/
- P5.h = _exception_to_level5;
- P5.l = _exception_to_level5;
+ r7.h = _exception_to_level5;
+ r7.l = _exception_to_level5;
p4.l = lo(EVT5);
p4.h = hi(EVT5);
- [p4] = p5;
+ [p4] = r7;
csync;
- GET_PDA(p5, r6);
-#ifndef CONFIG_DEBUG_DOUBLEFAULT
-
/*
* Save these registers, as they are only valid in exception context
* (where we are now - as soon as we defer to IRQ5, they can change)
@@ -337,14 +317,18 @@ ENTRY(_ex_trap_c)
r6 = retx;
[p5 + PDA_RETX] = r6;
-#endif
+
+ r6 = SEQSTAT;
+ [p5 + PDA_SEQSTAT] = r6;
+
+ /* Save the state of single stepping */
r6 = SYSCFG;
[p5 + PDA_SYSCFG] = r6;
- BITCLR(r6, 0);
+ /* Clear it while we handle the exception in IRQ5 mode */
+ BITCLR(r6, SYSCFG_SSSTEP_P);
SYSCFG = r6;
- /* Disable all interrupts, but make sure level 5 is enabled so
- * we can switch to that level. Save the old mask. */
+ /* Save the current IMASK, since we change in order to jump to level 5 */
cli r6;
[p5 + PDA_EXIMASK] = r6;
@@ -352,9 +336,21 @@ ENTRY(_ex_trap_c)
p4.h = hi(SAFE_USER_INSTRUCTION);
retx = p4;
+ /* Disable all interrupts, but make sure level 5 is enabled so
+ * we can switch to that level.
+ */
r6 = 0x3f;
sti r6;
+ /* In case interrupts are disabled IPEND[4] (global interrupt disable bit)
+ * clear it (re-enabling interrupts again) by the special sequence of pushing
+ * RETI onto the stack. This way we can lower ourselves to IVG5 even if the
+ * exception was taken after the interrupt handler was called but before it
+ * got a chance to enable global interrupts itself.
+ */
+ [--sp] = reti;
+ sp += 4;
+
raise 5;
jump.s _bfin_return_from_exception;
ENDPROC(_ex_trap_c)
@@ -363,7 +359,7 @@ ENDPROC(_ex_trap_c)
* exception. This is a unrecoverable event, so crash.
* Note: this cannot be ENTRY() as we jump here with "if cc jump" ...
*/
-_double_fault:
+ENTRY(_double_fault)
/* Turn caches & protection off, to ensure we don't get any more
* double exceptions
*/
@@ -373,8 +369,7 @@ _double_fault:
R5 = [P4]; /* Control Register*/
BITCLR(R5,ENICPLB_P);
- SSYNC; /* SSYNC required before writing to IMEM_CONTROL. */
- .align 8;
+ CSYNC; /* Disabling of CPLBs should be proceeded by a CSYNC */
[P4] = R5;
SSYNC;
@@ -382,8 +377,7 @@ _double_fault:
P4.H = HI(DMEM_CONTROL);
R5 = [P4];
BITCLR(R5,ENDCPLB_P);
- SSYNC; /* SSYNC required before writing to DMEM_CONTROL. */
- .align 8;
+ CSYNC; /* Disabling of CPLBs should be proceeded by a CSYNC */
[P4] = R5;
SSYNC;
@@ -404,7 +398,7 @@ _double_fault:
r0 = sp; /* stack frame pt_regs pointer argument ==> r0 */
SP += -12;
- call _double_fault_c;
+ pseudo_long_call _double_fault_c, p5;
SP += 12;
.L_double_fault_panic:
JUMP .L_double_fault_panic
@@ -414,47 +408,55 @@ ENDPROC(_double_fault)
ENTRY(_exception_to_level5)
SAVE_ALL_SYS
- GET_PDA(p4, r7); /* Fetch current PDA */
- r6 = [p4 + PDA_RETX];
+ GET_PDA(p5, r7); /* Fetch current PDA */
+ r6 = [p5 + PDA_RETX];
[sp + PT_PC] = r6;
- r6 = [p4 + PDA_SYSCFG];
+ r6 = [p5 + PDA_SYSCFG];
[sp + PT_SYSCFG] = r6;
- /* Restore interrupt mask. We haven't pushed RETI, so this
- * doesn't enable interrupts until we return from this handler. */
- r6 = [p4 + PDA_EXIMASK];
- sti r6;
+ r6 = [p5 + PDA_SEQSTAT]; /* Read back seqstat */
+ [sp + PT_SEQSTAT] = r6;
/* Restore the hardware error vector. */
- P5.h = _evt_ivhw;
- P5.l = _evt_ivhw;
+ r7.h = _evt_ivhw;
+ r7.l = _evt_ivhw;
p4.l = lo(EVT5);
p4.h = hi(EVT5);
- [p4] = p5;
+ [p4] = r7;
csync;
- p2.l = lo(IPEND);
- p2.h = hi(IPEND);
- csync;
- r0 = [p2]; /* Read current IPEND */
- [sp + PT_IPEND] = r0; /* Store IPEND */
+#ifdef CONFIG_DEBUG_DOUBLEFAULT
+ /* Now that we have the hardware error vector programmed properly
+ * we can re-enable interrupts (IPEND[4]), so if the _trap_c causes
+ * another hardware error, we can catch it (self-nesting).
+ */
+ [--sp] = reti;
+ sp += 4;
+#endif
+
+ r7 = [p5 + PDA_EXIPEND] /* Read the IPEND from the Exception state */
+ [sp + PT_IPEND] = r7; /* Store IPEND onto the stack */
r0 = sp; /* stack frame pt_regs pointer argument ==> r0 */
SP += -12;
- call _trap_c;
+ pseudo_long_call _trap_c, p4;
SP += 12;
-#ifdef CONFIG_DEBUG_DOUBLEFAULT
- /* Grab ILAT */
- p2.l = lo(ILAT);
- p2.h = hi(ILAT);
- r0 = [p2];
- r1 = 0x20; /* Did I just cause anther HW error? */
- r0 = r0 & r1;
- CC = R0 == R1;
- if CC JUMP _double_fault;
-#endif
+ /* If interrupts were off during the exception (IPEND[4] = 1), turn them off
+ * before we return.
+ */
+ CC = BITTST(r7, EVT_IRPTEN_P)
+ if !CC jump 1f;
+ /* this will load a random value into the reti register - but that is OK,
+ * since we do restore it to the correct value in the 'RESTORE_ALL_SYS' macro
+ */
+ sp += -4;
+ reti = [sp++];
+1:
+ /* restore the interrupt mask (IMASK) */
+ r6 = [p5 + PDA_EXIMASK];
+ sti r6;
call _ret_from_exception;
RESTORE_ALL_SYS
@@ -468,18 +470,21 @@ ENTRY(_trap) /* Exception: 4th entry into system event table(supervisor mode)*/
*/
EX_SCRATCH_REG = sp;
GET_PDA_SAFE(sp);
- sp = [sp + PDA_EXSTACK]
+ sp = [sp + PDA_EXSTACK];
/* Try to deal with syscalls quickly. */
[--sp] = ASTAT;
[--sp] = (R7:6,P5:4);
-#if ANOMALY_05000283 || ANOMALY_05000315
- cc = r7 == r7;
- p5.h = HI(CHIPID);
- p5.l = LO(CHIPID);
- if cc jump 1f;
- r7.l = W[p5];
-1:
+ ANOMALY_283_315_WORKAROUND(p5, r7)
+
+#ifdef CONFIG_EXACT_HWERR
+ /* Make sure all pending read/writes complete. This will ensure any
+ * accesses which could cause hardware errors completes, and signal
+ * the the hardware before we do something silly, like crash the
+ * kernel. We don't need to work around anomaly 05000312, since
+ * we are already atomic
+ */
+ ssync;
#endif
#ifdef CONFIG_DEBUG_DOUBLEFAULT
@@ -494,18 +499,18 @@ ENTRY(_trap) /* Exception: 4th entry into system event table(supervisor mode)*/
p4.l = lo(DCPLB_FAULT_ADDR);
p4.h = hi(DCPLB_FAULT_ADDR);
r7 = [p4];
- [p5 + PDA_DCPLB] = r7;
+ [p5 + PDA_DF_DCPLB] = r7;
p4.l = lo(ICPLB_FAULT_ADDR);
p4.h = hi(ICPLB_FAULT_ADDR);
r7 = [p4];
- [p5 + PDA_ICPLB] = r7;
+ [p5 + PDA_DF_ICPLB] = r7;
- r6 = retx;
- [p5 + PDA_RETX] = r6;
+ r7 = retx;
+ [p5 + PDA_DF_RETX] = r7;
r7 = SEQSTAT; /* reason code is in bit 5:0 */
- [p5 + PDA_SEQSTAT] = r7;
+ [p5 + PDA_DF_SEQSTAT] = r7;
#else
r7 = SEQSTAT; /* reason code is in bit 5:0 */
#endif
@@ -525,61 +530,6 @@ ENTRY(_trap) /* Exception: 4th entry into system event table(supervisor mode)*/
jump .Lsyscall_really_exit;
ENDPROC(_trap)
-ENTRY(_kernel_execve)
- link SIZEOF_PTREGS;
- p0 = sp;
- r3 = SIZEOF_PTREGS / 4;
- r4 = 0(x);
-.Lclear_regs:
- [p0++] = r4;
- r3 += -1;
- cc = r3 == 0;
- if !cc jump .Lclear_regs (bp);
-
- p0 = sp;
- sp += -16;
- [sp + 12] = p0;
- call _do_execve;
- SP += 16;
- cc = r0 == 0;
- if ! cc jump .Lexecve_failed;
- /* Success. Copy our temporary pt_regs to the top of the kernel
- * stack and do a normal exception return.
- */
- r1 = sp;
- r0 = (-KERNEL_STACK_SIZE) (x);
- r1 = r1 & r0;
- p2 = r1;
- p3 = [p2];
- r0 = KERNEL_STACK_SIZE - 4 (z);
- p1 = r0;
- p1 = p1 + p2;
-
- p0 = fp;
- r4 = [p0--];
- r3 = SIZEOF_PTREGS / 4;
-.Lcopy_regs:
- r4 = [p0--];
- [p1--] = r4;
- r3 += -1;
- cc = r3 == 0;
- if ! cc jump .Lcopy_regs (bp);
-
- r0 = (KERNEL_STACK_SIZE - SIZEOF_PTREGS) (z);
- p1 = r0;
- p1 = p1 + p2;
- sp = p1;
- r0 = syscfg;
- [SP + PT_SYSCFG] = r0;
- [p3 + (TASK_THREAD + THREAD_KSP)] = sp;
-
- RESTORE_CONTEXT;
- rti;
-.Lexecve_failed:
- unlink;
- rts;
-ENDPROC(_kernel_execve)
-
ENTRY(_system_call)
/* Store IPEND */
p2.l = lo(IPEND);
@@ -603,7 +553,7 @@ ENTRY(_system_call)
#ifdef CONFIG_IPIPE
r0 = sp;
SP += -12;
- call ___ipipe_syscall_root;
+ pseudo_long_call ___ipipe_syscall_root, p0;
SP += 12;
cc = r0 == 1;
if cc jump .Lsyscall_really_exit;
@@ -614,13 +564,6 @@ ENTRY(_system_call)
p0 = [sp + PT_ORIG_P0];
#endif /* CONFIG_IPIPE */
- /* Check the System Call */
- r7 = __NR_syscall;
- /* System call number is passed in P0 */
- r6 = p0;
- cc = r6 < r7;
- if ! cc jump .Lbadsys;
-
/* are we tracing syscalls?*/
r7 = sp;
r6.l = lo(ALIGN_PAGE_MASK);
@@ -630,6 +573,14 @@ ENTRY(_system_call)
r7 = [p2+TI_FLAGS];
CC = BITTST(r7,TIF_SYSCALL_TRACE);
if CC JUMP _sys_trace;
+ CC = BITTST(r7,TIF_SINGLESTEP);
+ if CC JUMP _sys_trace;
+
+ /* Make sure the system call # is valid */
+ p4 = __NR_syscall;
+ /* System call number is passed in P0 */
+ cc = p4 <= p0;
+ if cc jump .Lbadsys;
/* Execute the appropriate system call */
@@ -670,10 +621,16 @@ ENTRY(_system_call)
#ifdef CONFIG_IPIPE
cc = BITTST(r7, TIF_IRQ_SYNC);
if !cc jump .Lsyscall_no_irqsync;
+ /*
+ * Clear IPEND[4] manually to undo what resume_userspace_1 just did;
+ * we need this so that high priority domain interrupts may still
+ * preempt the current domain while the pipeline log is being played
+ * back.
+ */
[--sp] = reti;
- r0 = [sp++];
+ SP += 4; /* don't merge with next insn to keep the pattern obvious */
SP += -12;
- call ___ipipe_sync_root;
+ pseudo_long_call ___ipipe_sync_root, p4;
SP += 12;
jump .Lresume_userspace_1;
.Lsyscall_no_irqsync:
@@ -683,36 +640,32 @@ ENTRY(_system_call)
/* Reenable interrupts. */
[--sp] = reti;
- r0 = [sp++];
+ sp += 4;
SP += -12;
- call _schedule;
+ pseudo_long_call _schedule, p4;
SP += 12;
jump .Lresume_userspace_1;
.Lsyscall_sigpending:
- cc = BITTST(r7, TIF_RESTORE_SIGMASK);
- if cc jump .Lsyscall_do_signals;
cc = BITTST(r7, TIF_SIGPENDING);
+ if cc jump .Lsyscall_do_signals;
+ cc = BITTST(r7, TIF_NOTIFY_RESUME);
if !cc jump .Lsyscall_really_exit;
.Lsyscall_do_signals:
/* Reenable interrupts. */
[--sp] = reti;
- r0 = [sp++];
+ sp += 4;
r0 = sp;
SP += -12;
- call _do_signal;
+ pseudo_long_call _do_notify_resume, p5;
SP += 12;
.Lsyscall_really_exit:
r5 = [sp + PT_RESERVED];
rets = r5;
-#ifdef CONFIG_IPIPE
- [--sp] = reti;
- r5 = [sp++];
-#endif /* CONFIG_IPIPE */
rts;
ENDPROC(_system_call)
@@ -720,11 +673,17 @@ ENDPROC(_system_call)
* this symbol need not be global anyways, so ...
*/
_sys_trace:
- call _syscall_trace;
-
- /* Execute the appropriate system call */
+ r0 = sp;
+ pseudo_long_call _syscall_trace_enter, p5;
+ /* Make sure the system call # is valid */
p4 = [SP + PT_P0];
+ p3 = __NR_syscall;
+ cc = p3 <= p4;
+ r0 = -ENOSYS;
+ if cc jump .Lsys_trace_badsys;
+
+ /* Execute the appropriate system call */
p5.l = _sys_call_table;
p5.h = _sys_call_table;
p5 = p5 + (p4 << 2);
@@ -742,9 +701,11 @@ _sys_trace:
SP += -12;
call (p5);
SP += 24;
+.Lsys_trace_badsys:
[sp + PT_R0] = r0;
- call _syscall_trace;
+ r0 = sp;
+ pseudo_long_call _syscall_trace_leave, p5;
jump .Lresume_userspace;
ENDPROC(_sys_trace)
@@ -792,7 +753,7 @@ _new_old_task:
rets = [sp++];
/*
- * When we come out of resume, r0 carries "old" task, becuase we are
+ * When we come out of resume, r0 carries "old" task, because we are
* in "new" task.
*/
rts;
@@ -800,13 +761,13 @@ ENDPROC(_resume)
ENTRY(_ret_from_exception)
#ifdef CONFIG_IPIPE
- [--sp] = rets;
- SP += -12;
- call ___ipipe_check_root
- SP += 12
- rets = [sp++];
- cc = r0 == 0;
- if cc jump 4f; /* not on behalf of Linux, get out */
+ p2.l = _ipipe_percpu_domain;
+ p2.h = _ipipe_percpu_domain;
+ r0.l = _ipipe_root;
+ r0.h = _ipipe_root;
+ r2 = [p2];
+ cc = r0 == r2;
+ if !cc jump 4f; /* not on behalf of the root domain, get out */
#endif /* CONFIG_IPIPE */
p2.l = lo(IPEND);
p2.h = hi(IPEND);
@@ -855,25 +816,89 @@ ENTRY(_ret_from_exception)
p1.h = _schedule_and_signal;
[p0] = p1;
csync;
- raise 15; /* raise evt14 to do signal or reschedule */
+ raise 15; /* raise evt15 to do signal or reschedule */
4:
r0 = syscfg;
- bitclr(r0, 0);
+ bitclr(r0, SYSCFG_SSSTEP_P); /* Turn off single step */
syscfg = r0;
5:
rts;
ENDPROC(_ret_from_exception)
-#ifdef CONFIG_IPIPE
+#if defined(CONFIG_PREEMPT)
-_sync_root_irqs:
- [--sp] = reti; /* Reenable interrupts */
- r0 = [sp++];
- jump.l ___ipipe_sync_root
+ENTRY(_up_to_irq14)
+#if ANOMALY_05000281 || ANOMALY_05000461
+ r0.l = lo(SAFE_USER_INSTRUCTION);
+ r0.h = hi(SAFE_USER_INSTRUCTION);
+ reti = r0;
+#endif
+
+#ifdef CONFIG_DEBUG_HWERR
+ /* enable irq14 & hwerr interrupt, until we transition to _evt_evt14 */
+ r0 = (EVT_IVG14 | EVT_IVHW | EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU);
+#else
+ /* Only enable irq14 interrupt, until we transition to _evt_evt14 */
+ r0 = (EVT_IVG14 | EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU);
+#endif
+ sti r0;
+
+ p0.l = lo(EVT14);
+ p0.h = hi(EVT14);
+ p1.l = _evt_up_evt14;
+ p1.h = _evt_up_evt14;
+ [p0] = p1;
+ csync;
+
+ raise 14;
+1:
+ jump 1b;
+ENDPROC(_up_to_irq14)
+
+ENTRY(_evt_up_evt14)
+#ifdef CONFIG_DEBUG_HWERR
+ r0 = (EVT_IVHW | EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU);
+ sti r0;
+#else
+ cli r0;
+#endif
+#ifdef CONFIG_TRACE_IRQFLAGS
+ [--sp] = rets;
+ sp += -12;
+ call _trace_hardirqs_off;
+ sp += 12;
+ rets = [sp++];
+#endif
+ [--sp] = RETI;
+ SP += 4;
+
+ /* restore normal evt14 */
+ p0.l = lo(EVT14);
+ p0.h = hi(EVT14);
+ p1.l = _evt_evt14;
+ p1.h = _evt_evt14;
+ [p0] = p1;
+ csync;
+
+ rts;
+ENDPROC(_evt_up_evt14)
+
+#endif
+
+#ifdef CONFIG_IPIPE
_resume_kernel_from_int:
- r0.l = _sync_root_irqs
- r0.h = _sync_root_irqs
+ r1 = LO(~0x8000) (Z);
+ r1 = r0 & r1;
+ r0 = 1;
+ r0 = r1 - r0;
+ r2 = r1 & r0;
+ cc = r2 == 0;
+ /* Sync the root stage only from the outer interrupt level. */
+ if !cc jump .Lnosync;
+ r0.l = ___ipipe_sync_root;
+ r0.h = ___ipipe_sync_root;
+ [--sp] = reti;
[--sp] = rets;
[--sp] = ( r7:4, p5:3 );
SP += -12;
@@ -881,9 +906,57 @@ _resume_kernel_from_int:
SP += 12;
( r7:4, p5:3 ) = [sp++];
rets = [sp++];
+ reti = [sp++];
+.Lnosync:
rts
+#elif defined(CONFIG_PREEMPT)
+
+_resume_kernel_from_int:
+ /* check preempt_count */
+ r7 = sp;
+ r4.l = lo(ALIGN_PAGE_MASK);
+ r4.h = hi(ALIGN_PAGE_MASK);
+ r7 = r7 & r4;
+ p5 = r7;
+ r7 = [p5 + TI_PREEMPT];
+ cc = r7 == 0x0;
+ if !cc jump .Lreturn_to_kernel;
+.Lneed_schedule:
+ r7 = [p5 + TI_FLAGS];
+ r4.l = lo(_TIF_WORK_MASK);
+ r4.h = hi(_TIF_WORK_MASK);
+ r7 = r7 & r4;
+ cc = BITTST(r7, TIF_NEED_RESCHED);
+ if !cc jump .Lreturn_to_kernel;
+ /*
+ * let schedule done at level 15, otherwise sheduled process will run
+ * at high level and block low level interrupt
+ */
+ r6 = reti; /* save reti */
+ r5.l = .Lkernel_schedule;
+ r5.h = .Lkernel_schedule;
+ reti = r5;
+ rti;
+.Lkernel_schedule:
+ [--sp] = rets;
+ sp += -12;
+ pseudo_long_call _preempt_schedule_irq, p4;
+ sp += 12;
+ rets = [sp++];
+
+ [--sp] = rets;
+ sp += -12;
+ /* up to irq14 so that reti after restore_all can return to irq15(kernel) */
+ pseudo_long_call _up_to_irq14, p4;
+ sp += 12;
+ rets = [sp++];
+
+ reti = r6; /* restore reti so that origin process can return to interrupted point */
+
+ jump .Lneed_schedule;
#else
-#define _resume_kernel_from_int 2f
+
+#define _resume_kernel_from_int .Lreturn_to_kernel
#endif
ENTRY(_return_from_int)
@@ -893,7 +966,7 @@ ENTRY(_return_from_int)
p2.h = hi(ILAT);
r0 = [p2];
cc = bittst (r0, EVT_IVG15_P);
- if cc jump 2f;
+ if cc jump .Lreturn_to_kernel;
/* if not return to user mode, get out */
p2.l = lo(IPEND);
@@ -916,7 +989,7 @@ ENTRY(_return_from_int)
p1.h = _schedule_and_signal_from_int;
[p0] = p1;
csync;
-#if ANOMALY_05000281
+#if ANOMALY_05000281 || ANOMALY_05000461
r0.l = lo(SAFE_USER_INSTRUCTION);
r0.h = hi(SAFE_USER_INSTRUCTION);
reti = r0;
@@ -925,32 +998,49 @@ ENTRY(_return_from_int)
STI r0;
raise 15; /* raise evt15 to do signal or reschedule */
rti;
-2:
+.Lreturn_to_kernel:
rts;
ENDPROC(_return_from_int)
ENTRY(_lower_to_irq14)
-#if ANOMALY_05000281
+#if ANOMALY_05000281 || ANOMALY_05000461
r0.l = lo(SAFE_USER_INSTRUCTION);
r0.h = hi(SAFE_USER_INSTRUCTION);
reti = r0;
#endif
- r0 = 0x401f;
+
+#ifdef CONFIG_DEBUG_HWERR
+ /* enable irq14 & hwerr interrupt, until we transition to _evt_evt14 */
+ r0 = (EVT_IVG14 | EVT_IVHW | EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU);
+#else
+ /* Only enable irq14 interrupt, until we transition to _evt_evt14 */
+ r0 = (EVT_IVG14 | EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU);
+#endif
sti r0;
raise 14;
rti;
-ENTRY(_evt14_softirq)
+ENDPROC(_lower_to_irq14)
+
+ENTRY(_evt_evt14)
#ifdef CONFIG_DEBUG_HWERR
- r0 = 0x3f;
+ r0 = (EVT_IVHW | EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU);
sti r0;
#else
cli r0;
#endif
+#ifdef CONFIG_TRACE_IRQFLAGS
+ [--sp] = rets;
+ sp += -12;
+ call _trace_hardirqs_off;
+ sp += 12;
+ rets = [sp++];
+#endif
[--sp] = RETI;
SP += 4;
rts;
+ENDPROC(_evt_evt14)
-_schedule_and_signal_from_int:
+ENTRY(_schedule_and_signal_from_int)
/* To end up here, vector 15 was changed - so we have to change it
* back.
*/
@@ -968,6 +1058,14 @@ _schedule_and_signal_from_int:
p1 = rets;
[sp + PT_RESERVED] = p1;
+#ifdef CONFIG_TRACE_IRQFLAGS
+ /* trace_hardirqs_on() checks if all irqs are disabled. But here IRQ 15
+ * is turned on, so disable all irqs. */
+ cli r0;
+ sp += -12;
+ call _trace_hardirqs_on;
+ sp += 12;
+#endif
#ifdef CONFIG_SMP
GET_PDA(p0, r0); /* Fetch current PDA (can't migrate to other CPU here) */
r0 = [p0 + PDA_IRQFLAGS];
@@ -978,13 +1076,22 @@ _schedule_and_signal_from_int:
#endif
sti r0;
+ /* finish the userspace "atomic" functions for it */
+ r1.l = lo(FIXED_CODE_END);
+ r1.h = hi(FIXED_CODE_END);
+ r2 = [sp + PT_PC];
+ cc = r1 <= r2;
+ if cc jump .Lresume_userspace (bp);
+
r0 = sp;
sp += -12;
- call _finish_atomic_sections;
+
+ pseudo_long_call _finish_atomic_sections, p5;
sp += 12;
jump.s .Lresume_userspace;
+ENDPROC(_schedule_and_signal_from_int)
-_schedule_and_signal:
+ENTRY(_schedule_and_signal)
SAVE_CONTEXT_SYSCALL
/* To end up here, vector 15 was changed - so we have to change it
* back.
@@ -1002,7 +1109,7 @@ _schedule_and_signal:
1:
RESTORE_CONTEXT
rti;
-ENDPROC(_lower_to_irq14)
+ENDPROC(_schedule_and_signal)
/* We handle this 100% in exception space - to reduce overhead
* Only potiential problem is if the software buffer gets swapped out of the
@@ -1074,20 +1181,13 @@ ENTRY(_software_trace_buff)
.endr
#endif /* CONFIG_DEBUG_BFIN_HWTRACE_EXPAND */
-#if CONFIG_EARLY_PRINTK
+#ifdef CONFIG_EARLY_PRINTK
__INIT
ENTRY(_early_trap)
SAVE_ALL_SYS
trace_buffer_stop(p0,r0);
-#if ANOMALY_05000283 || ANOMALY_05000315
- cc = r5 == r5;
- p4.h = HI(CHIPID);
- p4.l = LO(CHIPID);
- if cc jump 1f;
- r5.l = W[p4];
-1:
-#endif
+ ANOMALY_283_315_WORKAROUND(p4, r5)
/* Turn caches off, to ensure we don't get double exceptions */
@@ -1096,9 +1196,7 @@ ENTRY(_early_trap)
R5 = [P4]; /* Control Register*/
BITCLR(R5,ENICPLB_P);
- CLI R1;
- SSYNC; /* SSYNC required before writing to IMEM_CONTROL. */
- .align 8;
+ CSYNC; /* Disabling of CPLBs should be proceeded by a CSYNC */
[P4] = R5;
SSYNC;
@@ -1106,11 +1204,9 @@ ENTRY(_early_trap)
P4.H = HI(DMEM_CONTROL);
R5 = [P4];
BITCLR(R5,ENDCPLB_P);
- SSYNC; /* SSYNC required before writing to DMEM_CONTROL. */
- .align 8;
+ CSYNC; /* Disabling of CPLBs should be proceeded by a CSYNC */
[P4] = R5;
SSYNC;
- STI R1;
r0 = sp; /* stack frame pt_regs pointer argument ==> r0 */
r1 = RETX;
@@ -1217,7 +1313,7 @@ END(_ex_table)
ENTRY(_sys_call_table)
.long _sys_restart_syscall /* 0 */
.long _sys_exit
- .long _sys_fork
+ .long _sys_ni_syscall /* fork */
.long _sys_read
.long _sys_write
.long _sys_open /* 5 */
@@ -1335,12 +1431,12 @@ ENTRY(_sys_call_table)
.long _sys_ni_syscall /* old sys_ipc */
.long _sys_fsync
.long _sys_ni_syscall /* old sys_sigreturn */
- .long _sys_clone /* 120 */
+ .long _bfin_clone /* 120 */
.long _sys_setdomainname
.long _sys_newuname
.long _sys_ni_syscall /* old sys_modify_ldt */
.long _sys_adjtimex
- .long _sys_ni_syscall /* 125 */ /* sys_mprotect */
+ .long _sys_mprotect /* 125 */
.long _sys_ni_syscall /* old sys_sigprocmask */
.long _sys_ni_syscall /* old "creat_module" */
.long _sys_init_module
@@ -1359,16 +1455,16 @@ ENTRY(_sys_call_table)
.long _sys_getdents
.long _sys_ni_syscall /* sys_select */
.long _sys_flock
- .long _sys_ni_syscall /* sys_msync */
+ .long _sys_msync
.long _sys_readv /* 145 */
.long _sys_writev
.long _sys_getsid
.long _sys_fdatasync
.long _sys_sysctl
- .long _sys_ni_syscall /* 150 */ /* sys_mlock */
- .long _sys_ni_syscall /* sys_munlock */
- .long _sys_ni_syscall /* sys_mlockall */
- .long _sys_ni_syscall /* sys_munlockall */
+ .long _sys_mlock /* 150 */
+ .long _sys_munlock
+ .long _sys_mlockall
+ .long _sys_munlockall
.long _sys_sched_setparam
.long _sys_sched_getparam /* 155 */
.long _sys_sched_setscheduler
@@ -1384,7 +1480,7 @@ ENTRY(_sys_call_table)
.long _sys_ni_syscall /* for vm86 */
.long _sys_ni_syscall /* old "query_module" */
.long _sys_ni_syscall /* sys_poll */
- .long _sys_nfsservctl
+ .long _sys_ni_syscall /* old nfsservctl */
.long _sys_setresgid /* setresgid16 */ /* 170 */
.long _sys_getresgid /* getresgid16 */
.long _sys_prctl
@@ -1407,7 +1503,7 @@ ENTRY(_sys_call_table)
.long _sys_ni_syscall /* streams2 */
.long _sys_vfork /* 190 */
.long _sys_getrlimit
- .long _sys_mmap2
+ .long _sys_mmap_pgoff
.long _sys_truncate64
.long _sys_ftruncate64
.long _sys_stat64 /* 195 */
@@ -1433,8 +1529,8 @@ ENTRY(_sys_call_table)
.long _sys_setfsuid /* 215 */
.long _sys_setfsgid
.long _sys_pivot_root
- .long _sys_ni_syscall /* sys_mincore */
- .long _sys_ni_syscall /* sys_madvise */
+ .long _sys_mincore
+ .long _sys_madvise
.long _sys_getdents64 /* 220 */
.long _sys_fcntl64
.long _sys_ni_syscall /* reserved for TUX */
@@ -1490,7 +1586,7 @@ ENTRY(_sys_call_table)
.long _sys_utimes
.long _sys_fadvise64_64
.long _sys_ni_syscall /* vserver */
- .long _sys_ni_syscall /* 275, mbind */
+ .long _sys_mbind /* 275 */
.long _sys_ni_syscall /* get_mempolicy */
.long _sys_ni_syscall /* set_mempolicy */
.long _sys_mq_open
@@ -1583,24 +1679,23 @@ ENTRY(_sys_call_table)
.long _sys_inotify_init1 /* 365 */
.long _sys_preadv
.long _sys_pwritev
+ .long _sys_rt_tgsigqueueinfo
+ .long _sys_perf_event_open
+ .long _sys_recvmmsg /* 370 */
+ .long _sys_fanotify_init
+ .long _sys_fanotify_mark
+ .long _sys_prlimit64
+ .long _sys_cacheflush
+ .long _sys_name_to_handle_at /* 375 */
+ .long _sys_open_by_handle_at
+ .long _sys_clock_adjtime
+ .long _sys_syncfs
+ .long _sys_setns
+ .long _sys_sendmmsg /* 380 */
+ .long _sys_process_vm_readv
+ .long _sys_process_vm_writev
.rept NR_syscalls-(.-_sys_call_table)/4
.long _sys_ni_syscall
.endr
END(_sys_call_table)
-
-#ifdef CONFIG_EXCEPTION_L1_SCRATCH
-/* .section .l1.bss.scratch */
-.set _exception_stack_top, L1_SCRATCH_START + L1_SCRATCH_LENGTH
-#else
-#ifdef CONFIG_SYSCALL_TAB_L1
-.section .l1.bss
-#else
-.bss
-#endif
-ENTRY(_exception_stack)
- .rept 1024 * NR_CPUS
- .long 0
- .endr
-_exception_stack_top:
-#endif