diff options
Diffstat (limited to 'arch/xtensa/kernel')
| -rw-r--r-- | arch/xtensa/kernel/.gitignore | 1 | ||||
| -rw-r--r-- | arch/xtensa/kernel/Makefile | 10 | ||||
| -rw-r--r-- | arch/xtensa/kernel/align.S | 5 | ||||
| -rw-r--r-- | arch/xtensa/kernel/asm-offsets.c | 1 | ||||
| -rw-r--r-- | arch/xtensa/kernel/coprocessor.S | 9 | ||||
| -rw-r--r-- | arch/xtensa/kernel/entry.S | 868 | ||||
| -rw-r--r-- | arch/xtensa/kernel/head.S | 220 | ||||
| -rw-r--r-- | arch/xtensa/kernel/irq.c | 191 | ||||
| -rw-r--r-- | arch/xtensa/kernel/mcount.S | 50 | ||||
| -rw-r--r-- | arch/xtensa/kernel/mxhead.S | 85 | ||||
| -rw-r--r-- | arch/xtensa/kernel/pci.c | 10 | ||||
| -rw-r--r-- | arch/xtensa/kernel/platform.c | 4 | ||||
| -rw-r--r-- | arch/xtensa/kernel/process.c | 19 | ||||
| -rw-r--r-- | arch/xtensa/kernel/ptrace.c | 35 | ||||
| -rw-r--r-- | arch/xtensa/kernel/setup.c | 280 | ||||
| -rw-r--r-- | arch/xtensa/kernel/signal.c | 26 | ||||
| -rw-r--r-- | arch/xtensa/kernel/smp.c | 607 | ||||
| -rw-r--r-- | arch/xtensa/kernel/stacktrace.c | 120 | ||||
| -rw-r--r-- | arch/xtensa/kernel/syscall.c | 41 | ||||
| -rw-r--r-- | arch/xtensa/kernel/time.c | 147 | ||||
| -rw-r--r-- | arch/xtensa/kernel/traps.c | 165 | ||||
| -rw-r--r-- | arch/xtensa/kernel/vectors.S | 425 | ||||
| -rw-r--r-- | arch/xtensa/kernel/vmlinux.lds.S | 134 | ||||
| -rw-r--r-- | arch/xtensa/kernel/xtensa_ksyms.c | 15 |
24 files changed, 2448 insertions, 1020 deletions
diff --git a/arch/xtensa/kernel/.gitignore b/arch/xtensa/kernel/.gitignore new file mode 100644 index 00000000000..c5f676c3c22 --- /dev/null +++ b/arch/xtensa/kernel/.gitignore @@ -0,0 +1 @@ +vmlinux.lds diff --git a/arch/xtensa/kernel/Makefile b/arch/xtensa/kernel/Makefile index c3a59d992ac..18d962a8c0c 100644 --- a/arch/xtensa/kernel/Makefile +++ b/arch/xtensa/kernel/Makefile @@ -4,13 +4,17 @@ extra-y := head.o vmlinux.lds -obj-y := align.o entry.o irq.o coprocessor.o process.o ptrace.o \ - setup.o signal.o syscall.o time.o traps.o vectors.o platform.o \ - pci-dma.o +obj-y := align.o coprocessor.o entry.o irq.o pci-dma.o platform.o process.o \ + ptrace.o setup.o signal.o stacktrace.o syscall.o time.o traps.o \ + vectors.o obj-$(CONFIG_KGDB) += xtensa-stub.o obj-$(CONFIG_PCI) += pci.o obj-$(CONFIG_MODULES) += xtensa_ksyms.o module.o +obj-$(CONFIG_FUNCTION_TRACER) += mcount.o +obj-$(CONFIG_SMP) += smp.o mxhead.o + +AFLAGS_head.o += -mtext-section-literals # In the Xtensa architecture, assembly generates literals which must always # precede the L32R instruction with a relative offset less than 256 kB. diff --git a/arch/xtensa/kernel/align.S b/arch/xtensa/kernel/align.S index aa2e87b8566..d4cef6039a5 100644 --- a/arch/xtensa/kernel/align.S +++ b/arch/xtensa/kernel/align.S @@ -146,9 +146,9 @@ * a0: trashed, original value saved on stack (PT_AREG0) * a1: a1 * a2: new stack pointer, original in DEPC - * a3: dispatch table + * a3: a3 * depc: a2, original value saved on stack (PT_DEPC) - * excsave_1: a3 + * excsave_1: dispatch table * * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception @@ -171,7 +171,6 @@ ENTRY(fast_unaligned) s32i a8, a2, PT_AREG8 rsr a0, depc - xsr a3, excsave1 s32i a0, a2, PT_AREG2 s32i a3, a2, PT_AREG3 diff --git a/arch/xtensa/kernel/asm-offsets.c b/arch/xtensa/kernel/asm-offsets.c index 0701fad170d..1915c7c889b 100644 --- a/arch/xtensa/kernel/asm-offsets.c +++ b/arch/xtensa/kernel/asm-offsets.c @@ -42,6 +42,7 @@ int main(void) DEFINE(PT_ICOUNTLEVEL, offsetof (struct pt_regs, icountlevel)); DEFINE(PT_SYSCALL, offsetof (struct pt_regs, syscall)); DEFINE(PT_SCOMPARE1, offsetof(struct pt_regs, scompare1)); + DEFINE(PT_THREADPTR, offsetof(struct pt_regs, threadptr)); DEFINE(PT_AREG, offsetof (struct pt_regs, areg[0])); DEFINE(PT_AREG0, offsetof (struct pt_regs, areg[0])); DEFINE(PT_AREG1, offsetof (struct pt_regs, areg[1])); diff --git a/arch/xtensa/kernel/coprocessor.S b/arch/xtensa/kernel/coprocessor.S index 64765748486..a482df5df2b 100644 --- a/arch/xtensa/kernel/coprocessor.S +++ b/arch/xtensa/kernel/coprocessor.S @@ -32,9 +32,9 @@ * a0: trashed, original value saved on stack (PT_AREG0) * a1: a1 * a2: new stack pointer, original in DEPC - * a3: dispatch table + * a3: a3 * depc: a2, original value saved on stack (PT_DEPC) - * excsave_1: a3 + * excsave_1: dispatch table * * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception @@ -225,9 +225,9 @@ ENDPROC(coprocessor_restore) * a0: trashed, original value saved on stack (PT_AREG0) * a1: a1 * a2: new stack pointer, original in DEPC - * a3: dispatch table + * a3: a3 * depc: a2, original value saved on stack (PT_DEPC) - * excsave_1: a3 + * excsave_1: dispatch table * * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception @@ -245,7 +245,6 @@ ENTRY(fast_coprocessor) /* Save remaining registers a1-a3 and SAR */ - xsr a3, excsave1 s32i a3, a2, PT_AREG3 rsr a3, sar s32i a1, a2, PT_AREG1 diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S index 3777fec85e7..ef7f4990722 100644 --- a/arch/xtensa/kernel/entry.S +++ b/arch/xtensa/kernel/entry.S @@ -7,7 +7,7 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Copyright (C) 2004-2007 by Tensilica Inc. + * Copyright (C) 2004 - 2008 by Tensilica Inc. * * Chris Zankel <chris@zankel.net> * @@ -31,8 +31,6 @@ /* Unimplemented features. */ #undef KERNEL_STACK_OVERFLOW_CHECK -#undef PREEMPTIBLE_KERNEL -#undef ALLOCA_EXCEPTION_IN_IRAM /* Not well tested. * @@ -92,9 +90,9 @@ * a0: trashed, original value saved on stack (PT_AREG0) * a1: a1 * a2: new stack pointer, original value in depc - * a3: dispatch table + * a3: a3 * depc: a2, original value saved on stack (PT_DEPC) - * excsave1: a3 + * excsave1: dispatch table * * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception @@ -110,9 +108,8 @@ ENTRY(user_exception) - /* Save a2, a3, and depc, restore excsave_1 and set SP. */ + /* Save a1, a2, a3, and set SP. */ - xsr a3, excsave1 rsr a0, depc s32i a1, a2, PT_AREG1 s32i a0, a2, PT_AREG2 @@ -130,6 +127,11 @@ _user_exception: s32i a3, a1, PT_SAR s32i a2, a1, PT_ICOUNTLEVEL +#if XCHAL_HAVE_THREADPTR + rur a2, threadptr + s32i a2, a1, PT_THREADPTR +#endif + /* Rotate ws so that the current windowbase is at bit0. */ /* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */ @@ -233,9 +235,9 @@ ENDPROC(user_exception) * a0: trashed, original value saved on stack (PT_AREG0) * a1: a1 * a2: new stack pointer, original in DEPC - * a3: dispatch table + * a3: a3 * depc: a2, original value saved on stack (PT_DEPC) - * excsave_1: a3 + * excsave_1: dispatch table * * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception @@ -251,9 +253,8 @@ ENDPROC(user_exception) ENTRY(kernel_exception) - /* Save a0, a2, a3, DEPC and set SP. */ + /* Save a1, a2, a3, and set SP. */ - xsr a3, excsave1 # restore a3, excsave_1 rsr a0, depc # get a2 s32i a1, a2, PT_AREG1 s32i a0, a2, PT_AREG2 @@ -349,15 +350,16 @@ common_exception: * so we can allow exceptions and interrupts (*) again. * Set PS(EXCM = 0, UM = 0, RING = 0, OWB = 0, WOE = 1, INTLEVEL = X) * - * (*) We only allow interrupts if PS.INTLEVEL was not set to 1 before - * (interrupts disabled) and if this exception is not an interrupt. + * (*) We only allow interrupts if they were previously enabled and + * we're not handling an IRQ */ rsr a3, ps - addi a0, a0, -4 - movi a2, 1 - extui a3, a3, 0, 1 # a3 = PS.INTLEVEL[0] - moveqz a3, a2, a0 # a3 = 1 iff interrupt exception + addi a0, a0, -EXCCAUSE_LEVEL1_INTERRUPT + movi a2, LOCKLEVEL + extui a3, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH + # a3 = PS.INTLEVEL + moveqz a3, a2, a0 # a3 = LOCKLEVEL iff interrupt movi a2, 1 << PS_WOE_BIT or a3, a3, a2 rsr a0, exccause @@ -383,11 +385,27 @@ common_exception: save_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT +#ifdef CONFIG_TRACE_IRQFLAGS + l32i a4, a1, PT_DEPC + /* Double exception means we came here with an exception + * while PS.EXCM was set, i.e. interrupts disabled. + */ + bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f + l32i a4, a1, PT_EXCCAUSE + bnei a4, EXCCAUSE_LEVEL1_INTERRUPT, 1f + /* We came here with an interrupt means interrupts were enabled + * and we've just disabled them. + */ + movi a4, trace_hardirqs_off + callx4 a4 +1: +#endif + /* Go to second-level dispatcher. Set up parameters to pass to the * exception handler and call the exception handler. */ - movi a4, exc_table + rsr a4, excsave1 mov a6, a1 # pass stack frame mov a7, a0 # pass EXCCAUSE addx4 a4, a0, a4 @@ -398,13 +416,18 @@ common_exception: callx4 a4 /* Jump here for exception exit */ - + .global common_exception_return common_exception_return: +1: + rsil a2, LOCKLEVEL + /* Jump if we are returning from kernel exceptions. */ -1: l32i a3, a1, PT_PS - _bbci.l a3, PS_UM_BIT, 4f + l32i a3, a1, PT_PS + GET_THREAD_INFO(a2, a1) + l32i a4, a2, TI_FLAGS + _bbci.l a3, PS_UM_BIT, 6f /* Specific to a user exception exit: * We need to check some flags for signal handling and rescheduling, @@ -413,18 +436,16 @@ common_exception_return: * Note that we don't disable interrupts here. */ - GET_THREAD_INFO(a2,a1) - l32i a4, a2, TI_FLAGS - _bbsi.l a4, TIF_NEED_RESCHED, 3f _bbsi.l a4, TIF_NOTIFY_RESUME, 2f - _bbci.l a4, TIF_SIGPENDING, 4f + _bbci.l a4, TIF_SIGPENDING, 5f 2: l32i a4, a1, PT_DEPC bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f /* Call do_signal() */ + rsil a2, 0 movi a4, do_notify_resume # int do_notify_resume(struct pt_regs*) mov a6, a1 callx4 a4 @@ -432,11 +453,49 @@ common_exception_return: 3: /* Reschedule */ + rsil a2, 0 movi a4, schedule # void schedule (void) callx4 a4 j 1b -4: /* Restore optional registers. */ +#ifdef CONFIG_PREEMPT +6: + _bbci.l a4, TIF_NEED_RESCHED, 4f + + /* Check current_thread_info->preempt_count */ + + l32i a4, a2, TI_PRE_COUNT + bnez a4, 4f + movi a4, preempt_schedule_irq + callx4 a4 + j 1b +#endif + +5: +#ifdef CONFIG_DEBUG_TLB_SANITY + l32i a4, a1, PT_DEPC + bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f + movi a4, check_tlb_sanity + callx4 a4 +#endif +6: +4: +#ifdef CONFIG_TRACE_IRQFLAGS + l32i a4, a1, PT_DEPC + /* Double exception means we came here with an exception + * while PS.EXCM was set, i.e. interrupts disabled. + */ + bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f + l32i a4, a1, PT_EXCCAUSE + bnei a4, EXCCAUSE_LEVEL1_INTERRUPT, 1f + /* We came here with an interrupt means interrupts were enabled + * and we'll reenable them on return. + */ + movi a4, trace_hardirqs_on + callx4 a4 +1: +#endif + /* Restore optional registers. */ load_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT @@ -509,6 +568,11 @@ user_exception_exit: * (if we have restored WSBITS-1 frames). */ +#if XCHAL_HAVE_THREADPTR + l32i a3, a1, PT_THREADPTR + wur a3, threadptr +#endif + 2: j common_exception_exit /* This is the kernel exception exit. @@ -518,29 +582,6 @@ user_exception_exit: kernel_exception_exit: -#ifdef PREEMPTIBLE_KERNEL - -#ifdef CONFIG_PREEMPT - - /* - * Note: We've just returned from a call4, so we have - * at least 4 addt'l regs. - */ - - /* Check current_thread_info->preempt_count */ - - GET_THREAD_INFO(a2) - l32i a3, a2, TI_PREEMPT - bnez a3, 1f - - l32i a2, a2, TI_FLAGS - -1: - -#endif - -#endif - /* Check if we have to do a movsp. * * We only have to do a movsp if the previous window-frame has @@ -650,7 +691,7 @@ common_exception_exit: l32i a1, a1, PT_AREG1 rfe -1: wsr a0, depc +1: wsr a0, depc l32i a0, a1, PT_AREG0 l32i a1, a1, PT_AREG1 rfde @@ -753,7 +794,7 @@ ENTRY(unrecoverable_exception) wsr a1, windowbase rsync - movi a1, (1 << PS_WOE_BIT) | 1 + movi a1, (1 << PS_WOE_BIT) | LOCKLEVEL wsr a1, ps rsync @@ -777,176 +818,63 @@ ENDPROC(unrecoverable_exception) * * The ALLOCA handler is entered when user code executes the MOVSP * instruction and the caller's frame is not in the register file. - * In this case, the caller frame's a0..a3 are on the stack just - * below sp (a1), and this handler moves them. * - * For "MOVSP <ar>,<as>" without destination register a1, this routine - * simply moves the value from <as> to <ar> without moving the save area. + * This algorithm was taken from the Ross Morley's RTOS Porting Layer: + * + * /home/ross/rtos/porting/XtensaRTOS-PortingLayer-20090507/xtensa_vectors.S + * + * It leverages the existing window spill/fill routines and their support for + * double exceptions. The 'movsp' instruction will only cause an exception if + * the next window needs to be loaded. In fact this ALLOCA exception may be + * replaced at some point by changing the hardware to do a underflow exception + * of the proper size instead. + * + * This algorithm simply backs out the register changes started by the user + * excpetion handler, makes it appear that we have started a window underflow + * by rotating the window back and then setting the old window base (OWB) in + * the 'ps' register with the rolled back window base. The 'movsp' instruction + * will be re-executed and this time since the next window frames is in the + * active AR registers it won't cause an exception. + * + * If the WindowUnderflow code gets a TLB miss the page will get mapped + * the the partial windeowUnderflow will be handeled in the double exception + * handler. * * Entry condition: * * a0: trashed, original value saved on stack (PT_AREG0) * a1: a1 * a2: new stack pointer, original in DEPC - * a3: dispatch table + * a3: a3 * depc: a2, original value saved on stack (PT_DEPC) - * excsave_1: a3 + * excsave_1: dispatch table * * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception */ -#if XCHAL_HAVE_BE -#define _EXTUI_MOVSP_SRC(ar) extui ar, ar, 4, 4 -#define _EXTUI_MOVSP_DST(ar) extui ar, ar, 0, 4 -#else -#define _EXTUI_MOVSP_SRC(ar) extui ar, ar, 0, 4 -#define _EXTUI_MOVSP_DST(ar) extui ar, ar, 4, 4 -#endif - ENTRY(fast_alloca) + rsr a0, windowbase + rotw -1 + rsr a2, ps + extui a3, a2, PS_OWB_SHIFT, PS_OWB_WIDTH + xor a3, a3, a4 + l32i a4, a6, PT_AREG0 + l32i a1, a6, PT_DEPC + rsr a6, depc + wsr a1, depc + slli a3, a3, PS_OWB_SHIFT + xor a2, a2, a3 + wsr a2, ps + rsync - /* We shouldn't be in a double exception. */ - - l32i a0, a2, PT_DEPC - _bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, .Lunhandled_double - - rsr a0, depc # get a2 - s32i a4, a2, PT_AREG4 # save a4 and - s32i a0, a2, PT_AREG2 # a2 to stack - - /* Exit critical section. */ - - movi a0, 0 - s32i a0, a3, EXC_TABLE_FIXUP - - /* Restore a3, excsave_1 */ - - xsr a3, excsave1 # make sure excsave_1 is valid for dbl. - rsr a4, epc1 # get exception address - s32i a3, a2, PT_AREG3 # save a3 to stack - -#ifdef ALLOCA_EXCEPTION_IN_IRAM -#error iram not supported -#else - /* Note: l8ui not allowed in IRAM/IROM!! */ - l8ui a0, a4, 1 # read as(src) from MOVSP instruction -#endif - movi a3, .Lmovsp_src - _EXTUI_MOVSP_SRC(a0) # extract source register number - addx8 a3, a0, a3 - jx a3 - -.Lunhandled_double: - wsr a0, excsave1 - movi a0, unrecoverable_exception - callx0 a0 - - .align 8 -.Lmovsp_src: - l32i a3, a2, PT_AREG0; _j 1f; .align 8 - mov a3, a1; _j 1f; .align 8 - l32i a3, a2, PT_AREG2; _j 1f; .align 8 - l32i a3, a2, PT_AREG3; _j 1f; .align 8 - l32i a3, a2, PT_AREG4; _j 1f; .align 8 - mov a3, a5; _j 1f; .align 8 - mov a3, a6; _j 1f; .align 8 - mov a3, a7; _j 1f; .align 8 - mov a3, a8; _j 1f; .align 8 - mov a3, a9; _j 1f; .align 8 - mov a3, a10; _j 1f; .align 8 - mov a3, a11; _j 1f; .align 8 - mov a3, a12; _j 1f; .align 8 - mov a3, a13; _j 1f; .align 8 - mov a3, a14; _j 1f; .align 8 - mov a3, a15; _j 1f; .align 8 - -1: - -#ifdef ALLOCA_EXCEPTION_IN_IRAM -#error iram not supported -#else - l8ui a0, a4, 0 # read ar(dst) from MOVSP instruction -#endif - addi a4, a4, 3 # step over movsp - _EXTUI_MOVSP_DST(a0) # extract destination register - wsr a4, epc1 # save new epc_1 - - _bnei a0, 1, 1f # no 'movsp a1, ax': jump - - /* Move the save area. This implies the use of the L32E - * and S32E instructions, because this move must be done with - * the user's PS.RING privilege levels, not with ring 0 - * (kernel's) privileges currently active with PS.EXCM - * set. Note that we have stil registered a fixup routine with the - * double exception vector in case a double exception occurs. - */ - - /* a0,a4:avail a1:old user stack a2:exc. stack a3:new user stack. */ - - l32e a0, a1, -16 - l32e a4, a1, -12 - s32e a0, a3, -16 - s32e a4, a3, -12 - l32e a0, a1, -8 - l32e a4, a1, -4 - s32e a0, a3, -8 - s32e a4, a3, -4 - - /* Restore stack-pointer and all the other saved registers. */ - - mov a1, a3 - - l32i a4, a2, PT_AREG4 - l32i a3, a2, PT_AREG3 - l32i a0, a2, PT_AREG0 - l32i a2, a2, PT_AREG2 - rfe - - /* MOVSP <at>,<as> was invoked with <at> != a1. - * Because the stack pointer is not being modified, - * we should be able to just modify the pointer - * without moving any save area. - * The processor only traps these occurrences if the - * caller window isn't live, so unfortunately we can't - * use this as an alternate trap mechanism. - * So we just do the move. This requires that we - * resolve the destination register, not just the source, - * so there's some extra work. - * (PERHAPS NOT REALLY NEEDED, BUT CLEANER...) - */ - - /* a0 dst-reg, a1 user-stack, a2 stack, a3 value of src reg. */ - -1: movi a4, .Lmovsp_dst - addx8 a4, a0, a4 - jx a4 - - .align 8 -.Lmovsp_dst: - s32i a3, a2, PT_AREG0; _j 1f; .align 8 - mov a1, a3; _j 1f; .align 8 - s32i a3, a2, PT_AREG2; _j 1f; .align 8 - s32i a3, a2, PT_AREG3; _j 1f; .align 8 - s32i a3, a2, PT_AREG4; _j 1f; .align 8 - mov a5, a3; _j 1f; .align 8 - mov a6, a3; _j 1f; .align 8 - mov a7, a3; _j 1f; .align 8 - mov a8, a3; _j 1f; .align 8 - mov a9, a3; _j 1f; .align 8 - mov a10, a3; _j 1f; .align 8 - mov a11, a3; _j 1f; .align 8 - mov a12, a3; _j 1f; .align 8 - mov a13, a3; _j 1f; .align 8 - mov a14, a3; _j 1f; .align 8 - mov a15, a3; _j 1f; .align 8 - -1: l32i a4, a2, PT_AREG4 - l32i a3, a2, PT_AREG3 - l32i a0, a2, PT_AREG0 - l32i a2, a2, PT_AREG2 - rfe - + _bbci.l a4, 31, 4f + rotw -1 + _bbci.l a8, 30, 8f + rotw -1 + j _WindowUnderflow12 +8: j _WindowUnderflow8 +4: j _WindowUnderflow4 ENDPROC(fast_alloca) /* @@ -963,9 +891,9 @@ ENDPROC(fast_alloca) * a0: trashed, original value saved on stack (PT_AREG0) * a1: a1 * a2: new stack pointer, original in DEPC - * a3: dispatch table + * a3: a3 * depc: a2, original value saved on stack (PT_DEPC) - * excsave_1: a3 + * excsave_1: dispatch table */ ENTRY(fast_syscall_kernel) @@ -1012,7 +940,6 @@ ENTRY(fast_syscall_unrecoverable) l32i a0, a2, PT_AREG0 # restore a0 xsr a2, depc # restore a2, depc - rsr a3, excsave1 wsr a0, excsave1 movi a0, unrecoverable_exception @@ -1034,10 +961,10 @@ ENDPROC(fast_syscall_unrecoverable) * a0: a2 (syscall-nr), original value saved on stack (PT_AREG0) * a1: a1 * a2: new stack pointer, original in a0 and DEPC - * a3: dispatch table, original in excsave_1 + * a3: a3 * a4..a15: unchanged * depc: a2, original value saved on stack (PT_DEPC) - * excsave_1: a3 + * excsave_1: dispatch table * * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception @@ -1070,8 +997,6 @@ ENDPROC(fast_syscall_unrecoverable) ENTRY(fast_syscall_xtensa) - xsr a3, excsave1 # restore a3, excsave1 - s32i a7, a2, PT_AREG7 # we need an additional register movi a7, 4 # sizeof(unsigned int) access_ok a3, a7, a0, a2, .Leac # a0: scratch reg, a2: sp @@ -1134,9 +1059,9 @@ ENDPROC(fast_syscall_xtensa) * a0: trashed, original value saved on stack (PT_AREG0) * a1: a1 * a2: new stack pointer, original in DEPC - * a3: dispatch table + * a3: a3 * depc: a2, original value saved on stack (PT_DEPC) - * excsave_1: a3 + * excsave_1: dispatch table * * Note: We assume the stack pointer is EXC_TABLE_KSTK in the fixup handler. */ @@ -1145,199 +1070,64 @@ ENTRY(fast_syscall_spill_registers) /* Register a FIXUP handler (pass current wb as a parameter) */ + xsr a3, excsave1 movi a0, fast_syscall_spill_registers_fixup s32i a0, a3, EXC_TABLE_FIXUP rsr a0, windowbase s32i a0, a3, EXC_TABLE_PARAM + xsr a3, excsave1 # restore a3 and excsave_1 - /* Save a3 and SAR on stack. */ + /* Save a3, a4 and SAR on stack. */ rsr a0, sar - xsr a3, excsave1 # restore a3 and excsave_1 s32i a3, a2, PT_AREG3 - s32i a4, a2, PT_AREG4 - s32i a0, a2, PT_AREG5 # store SAR to PT_AREG5 + s32i a0, a2, PT_SAR - /* The spill routine might clobber a7, a11, and a15. */ + /* The spill routine might clobber a4, a7, a8, a11, a12, and a15. */ + s32i a4, a2, PT_AREG4 s32i a7, a2, PT_AREG7 + s32i a8, a2, PT_AREG8 s32i a11, a2, PT_AREG11 + s32i a12, a2, PT_AREG12 s32i a15, a2, PT_AREG15 - call0 _spill_registers # destroys a3, a4, and SAR - - /* Advance PC, restore registers and SAR, and return from exception. */ - - l32i a3, a2, PT_AREG5 - l32i a4, a2, PT_AREG4 - l32i a0, a2, PT_AREG0 - wsr a3, sar - l32i a3, a2, PT_AREG3 - - /* Restore clobbered registers. */ - - l32i a7, a2, PT_AREG7 - l32i a11, a2, PT_AREG11 - l32i a15, a2, PT_AREG15 - - movi a2, 0 - rfe - -ENDPROC(fast_syscall_spill_registers) - -/* Fixup handler. - * - * We get here if the spill routine causes an exception, e.g. tlb miss. - * We basically restore WINDOWBASE and WINDOWSTART to the condition when - * we entered the spill routine and jump to the user exception handler. - * - * a0: value of depc, original value in depc - * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE - * a3: exctable, original value in excsave1 - */ - -fast_syscall_spill_registers_fixup: - - rsr a2, windowbase # get current windowbase (a2 is saved) - xsr a0, depc # restore depc and a0 - ssl a2 # set shift (32 - WB) - - /* We need to make sure the current registers (a0-a3) are preserved. - * To do this, we simply set the bit for the current window frame - * in WS, so that the exception handlers save them to the task stack. - */ - - rsr a3, excsave1 # get spill-mask - slli a2, a3, 1 # shift left by one - - slli a3, a2, 32-WSBITS - src a2, a2, a3 # a1 = xxwww1yyxxxwww1yy...... - wsr a2, windowstart # set corrected windowstart - - movi a3, exc_table - l32i a2, a3, EXC_TABLE_DOUBLE_SAVE # restore a2 - l32i a3, a3, EXC_TABLE_PARAM # original WB (in user task) - - /* Return to the original (user task) WINDOWBASE. - * We leave the following frame behind: - * a0, a1, a2 same - * a3: trashed (saved in excsave_1) - * depc: depc (we have to return to that address) - * excsave_1: a3 - */ - - wsr a3, windowbase - rsync - - /* We are now in the original frame when we entered _spill_registers: - * a0: return address - * a1: used, stack pointer - * a2: kernel stack pointer - * a3: available, saved in EXCSAVE_1 - * depc: exception address - * excsave: a3 - * Note: This frame might be the same as above. - */ - - /* Setup stack pointer. */ - - addi a2, a2, -PT_USER_SIZE - s32i a0, a2, PT_AREG0 - - /* Make sure we return to this fixup handler. */ - - movi a3, fast_syscall_spill_registers_fixup_return - s32i a3, a2, PT_DEPC # setup depc - - /* Jump to the exception handler. */ - - movi a3, exc_table - rsr a0, exccause - addx4 a0, a0, a3 # find entry in table - l32i a0, a0, EXC_TABLE_FAST_USER # load handler - jx a0 - -fast_syscall_spill_registers_fixup_return: - - /* When we return here, all registers have been restored (a2: DEPC) */ - - wsr a2, depc # exception address - - /* Restore fixup handler. */ - - xsr a3, excsave1 - movi a2, fast_syscall_spill_registers_fixup - s32i a2, a3, EXC_TABLE_FIXUP - rsr a2, windowbase - s32i a2, a3, EXC_TABLE_PARAM - l32i a2, a3, EXC_TABLE_KSTK - - /* Load WB at the time the exception occurred. */ - - rsr a3, sar # WB is still in SAR - neg a3, a3 - wsr a3, windowbase - rsync - - /* Restore a3 and return. */ - - movi a3, exc_table - xsr a3, excsave1 - - rfde - - -/* - * spill all registers. - * - * This is not a real function. The following conditions must be met: - * - * - must be called with call0. - * - uses a3, a4 and SAR. - * - the last 'valid' register of each frame are clobbered. - * - the caller must have registered a fixup handler - * (or be inside a critical section) - * - PS_EXCM must be set (PS_WOE cleared?) - */ - -ENTRY(_spill_registers) - /* * Rotate ws so that the current windowbase is at bit 0. * Assume ws = xxxwww1yy (www1 current window frame). * Rotate ws right so that a4 = yyxxxwww1. */ - rsr a4, windowbase + rsr a0, windowbase rsr a3, windowstart # a3 = xxxwww1yy - ssr a4 # holds WB - slli a4, a3, WSBITS - or a3, a3, a4 # a3 = xxxwww1yyxxxwww1yy + ssr a0 # holds WB + slli a0, a3, WSBITS + or a3, a3, a0 # a3 = xxxwww1yyxxxwww1yy srl a3, a3 # a3 = 00xxxwww1yyxxxwww1 /* We are done if there are no more than the current register frame. */ extui a3, a3, 1, WSBITS-1 # a3 = 0yyxxxwww - movi a4, (1 << (WSBITS-1)) + movi a0, (1 << (WSBITS-1)) _beqz a3, .Lnospill # only one active frame? jump /* We want 1 at the top, so that we return to the current windowbase */ - or a3, a3, a4 # 1yyxxxwww + or a3, a3, a0 # 1yyxxxwww /* Skip empty frames - get 'oldest' WINDOWSTART-bit. */ wsr a3, windowstart # save shifted windowstart - neg a4, a3 - and a3, a4, a3 # first bit set from right: 000010000 + neg a0, a3 + and a3, a0, a3 # first bit set from right: 000010000 - ffs_ws a4, a3 # a4: shifts to skip empty frames + ffs_ws a0, a3 # a0: shifts to skip empty frames movi a3, WSBITS - sub a4, a3, a4 # WSBITS-a4:number of 0-bits from right - ssr a4 # save in SAR for later. + sub a0, a3, a0 # WSBITS-a0:number of 0-bits from right + ssr a0 # save in SAR for later. rsr a3, windowbase - add a3, a3, a4 + add a3, a3, a0 wsr a3, windowbase rsync @@ -1352,22 +1142,6 @@ ENTRY(_spill_registers) * we have to save 4,8. or 12 registers. */ - _bbsi.l a3, 1, .Lc4 - _bbsi.l a3, 2, .Lc8 - - /* Special case: we have a call12-frame starting at a4. */ - - _bbci.l a3, 3, .Lc12 # bit 3 shouldn't be zero! (Jump to Lc12 first) - - s32e a4, a1, -16 # a1 is valid with an empty spill area - l32e a4, a5, -12 - s32e a8, a4, -48 - mov a8, a4 - l32e a4, a1, -16 - j .Lc12c - -.Lnospill: - ret .Lloop: _bbsi.l a3, 1, .Lc4 _bbci.l a3, 2, .Lc12 @@ -1381,20 +1155,10 @@ ENTRY(_spill_registers) s32e a9, a4, -28 s32e a10, a4, -24 s32e a11, a4, -20 - srli a11, a3, 2 # shift windowbase by 2 rotw 2 _bnei a3, 1, .Lloop - -.Lexit: /* Done. Do the final rotation, set WS, and return. */ - - rotw 1 - rsr a3, windowbase - ssl a3 - movi a3, 1 - sll a3, a3 - wsr a3, windowstart - ret + j .Lexit .Lc4: s32e a4, a9, -16 s32e a5, a9, -12 @@ -1410,11 +1174,11 @@ ENTRY(_spill_registers) /* 12-register frame (call12) */ - l32e a2, a5, -12 - s32e a8, a2, -48 - mov a8, a2 + l32e a0, a5, -12 + s32e a8, a0, -48 + mov a8, a0 -.Lc12c: s32e a9, a8, -44 + s32e a9, a8, -44 s32e a10, a8, -40 s32e a11, a8, -36 s32e a12, a8, -32 @@ -1434,30 +1198,54 @@ ENTRY(_spill_registers) */ rotw 1 - mov a5, a13 + mov a4, a13 rotw -1 - s32e a4, a9, -16 - s32e a5, a9, -12 - s32e a6, a9, -8 - s32e a7, a9, -4 + s32e a4, a8, -16 + s32e a5, a8, -12 + s32e a6, a8, -8 + s32e a7, a8, -4 rotw 3 _beqi a3, 1, .Lexit j .Lloop -.Linvalid_mask: +.Lexit: - /* We get here because of an unrecoverable error in the window - * registers. If we are in user space, we kill the application, - * however, this condition is unrecoverable in kernel space. - */ + /* Done. Do the final rotation and set WS */ - rsr a0, ps - _bbci.l a0, PS_UM_BIT, 1f + rotw 1 + rsr a3, windowbase + ssl a3 + movi a3, 1 + sll a3, a3 + wsr a3, windowstart +.Lnospill: + + /* Advance PC, restore registers and SAR, and return from exception. */ + + l32i a3, a2, PT_SAR + l32i a0, a2, PT_AREG0 + wsr a3, sar + l32i a3, a2, PT_AREG3 + + /* Restore clobbered registers. */ + + l32i a4, a2, PT_AREG4 + l32i a7, a2, PT_AREG7 + l32i a8, a2, PT_AREG8 + l32i a11, a2, PT_AREG11 + l32i a12, a2, PT_AREG12 + l32i a15, a2, PT_AREG15 + + movi a2, 0 + rfe + +.Linvalid_mask: - /* User space: Setup a dummy frame and kill application. + /* We get here because of an unrecoverable error in the window + * registers, so set up a dummy frame and kill the user application. * Note: We assume EXC_TABLE_KSTK contains a valid stack pointer. */ @@ -1470,11 +1258,10 @@ ENTRY(_spill_registers) movi a0, 0 - movi a3, exc_table + rsr a3, excsave1 l32i a1, a3, EXC_TABLE_KSTK - wsr a3, excsave1 - movi a4, (1 << PS_WOE_BIT) | 1 + movi a4, (1 << PS_WOE_BIT) | LOCKLEVEL wsr a4, ps rsync @@ -1482,14 +1269,136 @@ ENTRY(_spill_registers) movi a4, do_exit callx4 a4 -1: /* Kernel space: PANIC! */ + /* shouldn't return, so panic */ wsr a0, excsave1 movi a0, unrecoverable_exception callx0 a0 # should not return 1: j 1b -ENDPROC(_spill_registers) + +ENDPROC(fast_syscall_spill_registers) + +/* Fixup handler. + * + * We get here if the spill routine causes an exception, e.g. tlb miss. + * We basically restore WINDOWBASE and WINDOWSTART to the condition when + * we entered the spill routine and jump to the user exception handler. + * + * Note that we only need to restore the bits in windowstart that have not + * been spilled yet by the _spill_register routine. Luckily, a3 contains a + * rotated windowstart with only those bits set for frames that haven't been + * spilled yet. Because a3 is rotated such that bit 0 represents the register + * frame for the current windowbase - 1, we need to rotate a3 left by the + * value of the current windowbase + 1 and move it to windowstart. + * + * a0: value of depc, original value in depc + * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE + * a3: exctable, original value in excsave1 + */ + +ENTRY(fast_syscall_spill_registers_fixup) + + rsr a2, windowbase # get current windowbase (a2 is saved) + xsr a0, depc # restore depc and a0 + ssl a2 # set shift (32 - WB) + + /* We need to make sure the current registers (a0-a3) are preserved. + * To do this, we simply set the bit for the current window frame + * in WS, so that the exception handlers save them to the task stack. + * + * Note: we use a3 to set the windowbase, so we take a special care + * of it, saving it in the original _spill_registers frame across + * the exception handler call. + */ + + xsr a3, excsave1 # get spill-mask + slli a3, a3, 1 # shift left by one + addi a3, a3, 1 # set the bit for the current window frame + + slli a2, a3, 32-WSBITS + src a2, a3, a2 # a2 = xxwww1yyxxxwww1yy...... + wsr a2, windowstart # set corrected windowstart + + srli a3, a3, 1 + rsr a2, excsave1 + l32i a2, a2, EXC_TABLE_DOUBLE_SAVE # restore a2 + xsr a2, excsave1 + s32i a3, a2, EXC_TABLE_DOUBLE_SAVE # save a3 + l32i a3, a2, EXC_TABLE_PARAM # original WB (in user task) + xsr a2, excsave1 + + /* Return to the original (user task) WINDOWBASE. + * We leave the following frame behind: + * a0, a1, a2 same + * a3: trashed (saved in EXC_TABLE_DOUBLE_SAVE) + * depc: depc (we have to return to that address) + * excsave_1: exctable + */ + + wsr a3, windowbase + rsync + + /* We are now in the original frame when we entered _spill_registers: + * a0: return address + * a1: used, stack pointer + * a2: kernel stack pointer + * a3: available + * depc: exception address + * excsave: exctable + * Note: This frame might be the same as above. + */ + + /* Setup stack pointer. */ + + addi a2, a2, -PT_USER_SIZE + s32i a0, a2, PT_AREG0 + + /* Make sure we return to this fixup handler. */ + + movi a3, fast_syscall_spill_registers_fixup_return + s32i a3, a2, PT_DEPC # setup depc + + /* Jump to the exception handler. */ + + rsr a3, excsave1 + rsr a0, exccause + addx4 a0, a0, a3 # find entry in table + l32i a0, a0, EXC_TABLE_FAST_USER # load handler + l32i a3, a3, EXC_TABLE_DOUBLE_SAVE + jx a0 + +ENDPROC(fast_syscall_spill_registers_fixup) + +ENTRY(fast_syscall_spill_registers_fixup_return) + + /* When we return here, all registers have been restored (a2: DEPC) */ + + wsr a2, depc # exception address + + /* Restore fixup handler. */ + + rsr a2, excsave1 + s32i a3, a2, EXC_TABLE_DOUBLE_SAVE + movi a3, fast_syscall_spill_registers_fixup + s32i a3, a2, EXC_TABLE_FIXUP + rsr a3, windowbase + s32i a3, a2, EXC_TABLE_PARAM + l32i a2, a2, EXC_TABLE_KSTK + + /* Load WB at the time the exception occurred. */ + + rsr a3, sar # WB is still in SAR + neg a3, a3 + wsr a3, windowbase + rsync + + rsr a3, excsave1 + l32i a3, a3, EXC_TABLE_DOUBLE_SAVE + + rfde + +ENDPROC(fast_syscall_spill_registers_fixup_return) #ifdef CONFIG_MMU /* @@ -1516,9 +1425,9 @@ ENDPROC(fast_second_level_miss_double_kernel) * a0: trashed, original value saved on stack (PT_AREG0) * a1: a1 * a2: new stack pointer, original in DEPC - * a3: dispatch table + * a3: a3 * depc: a2, original value saved on stack (PT_DEPC) - * excsave_1: a3 + * excsave_1: dispatch table * * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception @@ -1526,9 +1435,10 @@ ENDPROC(fast_second_level_miss_double_kernel) ENTRY(fast_second_level_miss) - /* Save a1. Note: we don't expect a double exception. */ + /* Save a1 and a3. Note: we don't expect a double exception. */ s32i a1, a2, PT_AREG1 + s32i a3, a2, PT_AREG3 /* We need to map the page of PTEs for the user task. Find * the pointer to that page. Also, it's possible for tsk->mm @@ -1550,9 +1460,6 @@ ENTRY(fast_second_level_miss) l32i a0, a1, TASK_MM # tsk->mm beqz a0, 9f - - /* We deliberately destroy a3 that holds the exception table. */ - 8: rsr a3, excvaddr # fault address _PGD_OFFSET(a0, a3, a1) l32i a0, a0, 0 # read pmdval @@ -1603,7 +1510,7 @@ ENTRY(fast_second_level_miss) /* Exit critical section. */ -4: movi a3, exc_table # restore a3 +4: rsr a3, excsave1 movi a0, 0 s32i a0, a3, EXC_TABLE_FIXUP @@ -1611,8 +1518,8 @@ ENTRY(fast_second_level_miss) l32i a0, a2, PT_AREG0 l32i a1, a2, PT_AREG1 + l32i a3, a2, PT_AREG3 l32i a2, a2, PT_DEPC - xsr a3, excsave1 bgeui a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f @@ -1699,11 +1606,8 @@ ENTRY(fast_second_level_miss) 2: /* Invalid PGD, default exception handling */ - movi a3, exc_table rsr a1, depc - xsr a3, excsave1 s32i a1, a2, PT_AREG2 - s32i a3, a2, PT_AREG3 mov a1, a2 rsr a2, ps @@ -1723,9 +1627,9 @@ ENDPROC(fast_second_level_miss) * a0: trashed, original value saved on stack (PT_AREG0) * a1: a1 * a2: new stack pointer, original in DEPC - * a3: dispatch table + * a3: a3 * depc: a2, original value saved on stack (PT_DEPC) - * excsave_1: a3 + * excsave_1: dispatch table * * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception @@ -1733,53 +1637,56 @@ ENDPROC(fast_second_level_miss) ENTRY(fast_store_prohibited) - /* Save a1 and a4. */ + /* Save a1 and a3. */ s32i a1, a2, PT_AREG1 - s32i a4, a2, PT_AREG4 + s32i a3, a2, PT_AREG3 GET_CURRENT(a1,a2) l32i a0, a1, TASK_MM # tsk->mm beqz a0, 9f 8: rsr a1, excvaddr # fault address - _PGD_OFFSET(a0, a1, a4) + _PGD_OFFSET(a0, a1, a3) l32i a0, a0, 0 beqz a0, 2f - /* Note that we assume _PAGE_WRITABLE_BIT is only set if pte is valid.*/ + /* + * Note that we test _PAGE_WRITABLE_BIT only if PTE is present + * and is not PAGE_NONE. See pgtable.h for possible PTE layouts. + */ - _PTE_OFFSET(a0, a1, a4) - l32i a4, a0, 0 # read pteval - bbci.l a4, _PAGE_WRITABLE_BIT, 2f + _PTE_OFFSET(a0, a1, a3) + l32i a3, a0, 0 # read pteval + movi a1, _PAGE_CA_INVALID + ball a3, a1, 2f + bbci.l a3, _PAGE_WRITABLE_BIT, 2f movi a1, _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_HW_WRITE - or a4, a4, a1 + or a3, a3, a1 rsr a1, excvaddr - s32i a4, a0, 0 + s32i a3, a0, 0 /* We need to flush the cache if we have page coloring. */ #if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK dhwb a0, 0 #endif pdtlb a0, a1 - wdtlb a4, a0 + wdtlb a3, a0 /* Exit critical section. */ movi a0, 0 + rsr a3, excsave1 s32i a0, a3, EXC_TABLE_FIXUP /* Restore the working registers, and return. */ - l32i a4, a2, PT_AREG4 + l32i a3, a2, PT_AREG3 l32i a1, a2, PT_AREG1 l32i a0, a2, PT_AREG0 l32i a2, a2, PT_DEPC - /* Restore excsave1 and a3. */ - - xsr a3, excsave1 bgeui a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f rsr a2, depc @@ -1796,11 +1703,8 @@ ENTRY(fast_store_prohibited) 2: /* If there was a problem, handle fault in C */ - rsr a4, depc # still holds a2 - xsr a3, excsave1 - s32i a4, a2, PT_AREG2 - s32i a3, a2, PT_AREG3 - l32i a4, a2, PT_AREG4 + rsr a3, depc # still holds a2 + s32i a3, a2, PT_AREG2 mov a1, a2 rsr a2, ps @@ -1867,6 +1771,43 @@ ENTRY(system_call) ENDPROC(system_call) +/* + * Spill live registers on the kernel stack macro. + * + * Entry condition: ps.woe is set, ps.excm is cleared + * Exit condition: windowstart has single bit set + * May clobber: a12, a13 + */ + .macro spill_registers_kernel + +#if XCHAL_NUM_AREGS > 16 + call12 1f + _j 2f + retw + .align 4 +1: + _entry a1, 48 + addi a12, a0, 3 +#if XCHAL_NUM_AREGS > 32 + .rept (XCHAL_NUM_AREGS - 32) / 12 + _entry a1, 48 + mov a12, a0 + .endr +#endif + _entry a1, 48 +#if XCHAL_NUM_AREGS % 12 == 0 + mov a8, a8 +#elif XCHAL_NUM_AREGS % 12 == 4 + mov a12, a12 +#elif XCHAL_NUM_AREGS % 12 == 8 + mov a4, a4 +#endif + retw +2: +#else + mov a12, a12 +#endif + .endm /* * Task switch. @@ -1879,21 +1820,20 @@ ENTRY(_switch_to) entry a1, 16 - mov a12, a2 # preserve 'prev' (a2) - mov a13, a3 # and 'next' (a3) + mov a10, a2 # preserve 'prev' (a2) + mov a11, a3 # and 'next' (a3) l32i a4, a2, TASK_THREAD_INFO l32i a5, a3, TASK_THREAD_INFO - save_xtregs_user a4 a6 a8 a9 a10 a11 THREAD_XTREGS_USER + save_xtregs_user a4 a6 a8 a9 a12 a13 THREAD_XTREGS_USER - s32i a0, a12, THREAD_RA # save return address - s32i a1, a12, THREAD_SP # save stack pointer + s32i a0, a10, THREAD_RA # save return address + s32i a1, a10, THREAD_SP # save stack pointer /* Disable ints while we manipulate the stack pointer. */ - movi a14, (1 << PS_EXCM_BIT) | LOCKLEVEL - xsr a14, ps + rsil a14, LOCKLEVEL rsr a3, excsave1 rsync s32i a3, a3, EXC_TABLE_FIXUP /* enter critical section */ @@ -1908,7 +1848,7 @@ ENTRY(_switch_to) /* Flush register file. */ - call0 _spill_registers # destroys a3, a4, and SAR + spill_registers_kernel /* Set kernel stack (and leave critical section) * Note: It's save to set it here. The stack will not be overwritten @@ -1922,15 +1862,15 @@ ENTRY(_switch_to) s32i a6, a3, EXC_TABLE_FIXUP s32i a7, a3, EXC_TABLE_KSTK - /* restore context of the task that 'next' addresses */ + /* restore context of the task 'next' */ - l32i a0, a13, THREAD_RA # restore return address - l32i a1, a13, THREAD_SP # restore stack pointer + l32i a0, a11, THREAD_RA # restore return address + l32i a1, a11, THREAD_SP # restore stack pointer - load_xtregs_user a5 a6 a8 a9 a10 a11 THREAD_XTREGS_USER + load_xtregs_user a5 a6 a8 a9 a12 a13 THREAD_XTREGS_USER wsr a14, ps - mov a2, a12 # return 'prev' + mov a2, a10 # return 'prev' rsync retw diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S index 91d9095284d..aeeb3cc8a41 100644 --- a/arch/xtensa/kernel/head.S +++ b/arch/xtensa/kernel/head.S @@ -7,7 +7,7 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Copyright (C) 2001 - 2005 Tensilica Inc. + * Copyright (C) 2001 - 2008 Tensilica Inc. * * Chris Zankel <chris@zankel.net> * Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca> @@ -19,6 +19,7 @@ #include <asm/page.h> #include <asm/cacheasm.h> #include <asm/initialize_mmu.h> +#include <asm/mxregs.h> #include <linux/init.h> #include <linux/linkage.h> @@ -48,36 +49,64 @@ */ __HEAD + .begin no-absolute-literals + ENTRY(_start) - _j 2f - .align 4 -1: .word _startup -2: l32r a0, 1b - jx a0 + /* Preserve the pointer to the boot parameter list in EXCSAVE_1 */ + wsr a2, excsave1 + _j _SetupOCD -ENDPROC(_start) + .align 4 + .literal_position +.Lstartup: + .word _startup - .section .init.text, "ax" + .align 4 +_SetupOCD: + /* + * Initialize WB, WS, and clear PS.EXCM (to allow loop instructions). + * Set Interrupt Level just below XCHAL_DEBUGLEVEL to allow + * xt-gdb to single step via DEBUG exceptions received directly + * by ocd. + */ + movi a1, 1 + movi a0, 0 + wsr a1, windowstart + wsr a0, windowbase + rsync -ENTRY(_startup) + movi a1, LOCKLEVEL + wsr a1, ps + rsync - /* Disable interrupts and exceptions. */ + .global _SetupMMU +_SetupMMU: + Offset = _SetupMMU - _start - movi a0, LOCKLEVEL - wsr a0, ps +#ifdef CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX + initialize_mmu +#if defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY + rsr a2, excsave1 + movi a3, 0x08000000 + bgeu a2, a3, 1f + movi a3, 0xd0000000 + add a2, a2, a3 + wsr a2, excsave1 +1: +#endif +#endif + .end no-absolute-literals - /* Preserve the pointer to the boot parameter list in EXCSAVE_1 */ + l32r a0, .Lstartup + jx a0 - wsr a2, excsave1 +ENDPROC(_start) - /* Start with a fresh windowbase and windowstart. */ + __REF + .literal_position - movi a1, 1 - movi a0, 0 - wsr a1, windowstart - wsr a0, windowbase - rsync +ENTRY(_startup) /* Set a0 to 0 for the remaining initialization. */ @@ -86,7 +115,9 @@ ENTRY(_startup) /* Clear debugging registers. */ #if XCHAL_HAVE_DEBUG +#if XCHAL_NUM_IBREAK > 0 wsr a0, ibreakenable +#endif wsr a0, icount movi a1, 15 wsr a0, icountlevel @@ -128,17 +159,6 @@ ENTRY(_startup) wsr a0, cpenable #endif - /* Set PS.INTLEVEL=1, PS.WOE=0, kernel stack, PS.EXCM=0 - * - * Note: PS.EXCM must be cleared before using any loop - * instructions; otherwise, they are silently disabled, and - * at most one iteration of the loop is executed. - */ - - movi a1, 1 - wsr a1, ps - rsync - /* Initialize the caches. * a2, a3 are just working registers (clobbered). */ @@ -156,7 +176,36 @@ ENTRY(_startup) isync - initialize_mmu +#ifdef CONFIG_HAVE_SMP + movi a2, CCON # MX External Register to Configure Cache + movi a3, 1 + wer a3, a2 +#endif + + /* Setup stack and enable window exceptions (keep irqs disabled) */ + + movi a1, start_info + l32i a1, a1, 0 + + movi a2, (1 << PS_WOE_BIT) | LOCKLEVEL + # WOE=1, INTLEVEL=LOCKLEVEL, UM=0 + wsr a2, ps # (enable reg-windows; progmode stack) + rsync + + /* Set up EXCSAVE[DEBUGLEVEL] to point to the Debug Exception Handler.*/ + + movi a2, debug_exception + wsr a2, SREG_EXCSAVE + XCHAL_DEBUGLEVEL + +#ifdef CONFIG_SMP + /* + * Notice that we assume with SMP that cores have PRID + * supported by the cores. + */ + rsr a2, prid + bnez a2, .Lboot_secondary + +#endif /* CONFIG_SMP */ /* Unpack data sections * @@ -205,24 +254,12 @@ ENTRY(_startup) ___flush_dcache_all a2 a3 #endif + memw + isync + ___invalidate_icache_all a2 a3 + isync - /* Setup stack and enable window exceptions (keep irqs disabled) */ - - movi a1, init_thread_union - addi a1, a1, KERNEL_STACK_SIZE - - movi a2, 0x00040001 # WOE=1, INTLEVEL=1, UM=0 - wsr a2, ps # (enable reg-windows; progmode stack) - rsync - - /* Set up EXCSAVE[DEBUGLEVEL] to point to the Debug Exception Handler.*/ - - movi a2, debug_exception - wsr a2, SREG_EXCSAVE + XCHAL_DEBUGLEVEL - - /* Set up EXCSAVE[1] to point to the exc_table. */ - - movi a6, exc_table + movi a6, 0 xsr a6, excsave1 /* init_arch kick-starts the linux kernel */ @@ -236,8 +273,93 @@ ENTRY(_startup) should_never_return: j should_never_return +#ifdef CONFIG_SMP +.Lboot_secondary: + + movi a2, cpu_start_ccount +1: + l32i a3, a2, 0 + beqi a3, 0, 1b + movi a3, 0 + s32i a3, a2, 0 + memw +1: + l32i a3, a2, 0 + beqi a3, 0, 1b + wsr a3, ccount + movi a3, 0 + s32i a3, a2, 0 + memw + + movi a6, 0 + wsr a6, excsave1 + + movi a4, secondary_start_kernel + callx4 a4 + j should_never_return + +#endif /* CONFIG_SMP */ + ENDPROC(_startup) +#ifdef CONFIG_HOTPLUG_CPU + +ENTRY(cpu_restart) + +#if XCHAL_DCACHE_IS_WRITEBACK + ___flush_invalidate_dcache_all a2 a3 +#else + ___invalidate_dcache_all a2 a3 +#endif + memw + movi a2, CCON # MX External Register to Configure Cache + movi a3, 0 + wer a3, a2 + extw + + rsr a0, prid + neg a2, a0 + movi a3, cpu_start_id + s32i a2, a3, 0 +#if XCHAL_DCACHE_IS_WRITEBACK + dhwbi a3, 0 +#endif +1: + l32i a2, a3, 0 + dhi a3, 0 + bne a2, a0, 1b + + /* + * Initialize WB, WS, and clear PS.EXCM (to allow loop instructions). + * Set Interrupt Level just below XCHAL_DEBUGLEVEL to allow + * xt-gdb to single step via DEBUG exceptions received directly + * by ocd. + */ + movi a1, 1 + movi a0, 0 + wsr a1, windowstart + wsr a0, windowbase + rsync + + movi a1, LOCKLEVEL + wsr a1, ps + rsync + + j _startup + +ENDPROC(cpu_restart) + +#endif /* CONFIG_HOTPLUG_CPU */ + +/* + * DATA section + */ + + .section ".data.init.refok" + .align 4 +ENTRY(start_info) + .long init_thread_union + KERNEL_STACK_SIZE + /* * BSS section */ diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c index 6f4f9749cff..3eee94f621e 100644 --- a/arch/xtensa/kernel/irq.c +++ b/arch/xtensa/kernel/irq.c @@ -4,7 +4,7 @@ * Xtensa built-in interrupt controller and some generic functions copied * from i386. * - * Copyright (C) 2002 - 2006 Tensilica, Inc. + * Copyright (C) 2002 - 2013 Tensilica, Inc. * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar * * @@ -18,36 +18,27 @@ #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/kernel_stat.h> +#include <linux/irqchip.h> +#include <linux/irqchip/xtensa-mx.h> +#include <linux/irqchip/xtensa-pic.h> #include <linux/irqdomain.h> #include <linux/of.h> +#include <asm/mxregs.h> #include <asm/uaccess.h> #include <asm/platform.h> -static unsigned int cached_irq_mask; - atomic_t irq_err_count; -static struct irq_domain *root_domain; - -/* - * do_IRQ handles all normal device IRQ's (the special - * SMP cross-CPU interrupts have their own specific - * handlers). - */ - asmlinkage void do_IRQ(int hwirq, struct pt_regs *regs) { - struct pt_regs *old_regs = set_irq_regs(regs); - int irq = irq_find_mapping(root_domain, hwirq); + int irq = irq_find_mapping(NULL, hwirq); if (hwirq >= NR_IRQS) { printk(KERN_EMERG "%s: cannot handle IRQ %d\n", __func__, hwirq); } - irq_enter(); - #ifdef CONFIG_DEBUG_STACKOVERFLOW /* Debugging check for stack overflow: is there less than 1KB free? */ { @@ -62,95 +53,69 @@ asmlinkage void do_IRQ(int hwirq, struct pt_regs *regs) } #endif generic_handle_irq(irq); - - irq_exit(); - set_irq_regs(old_regs); } int arch_show_interrupts(struct seq_file *p, int prec) { +#ifdef CONFIG_SMP + show_ipi_list(p, prec); +#endif seq_printf(p, "%*s: ", prec, "ERR"); seq_printf(p, "%10u\n", atomic_read(&irq_err_count)); return 0; } -static void xtensa_irq_mask(struct irq_data *d) -{ - cached_irq_mask &= ~(1 << d->hwirq); - set_sr (cached_irq_mask, intenable); -} - -static void xtensa_irq_unmask(struct irq_data *d) -{ - cached_irq_mask |= 1 << d->hwirq; - set_sr (cached_irq_mask, intenable); -} - -static void xtensa_irq_enable(struct irq_data *d) -{ - variant_irq_enable(d->hwirq); - xtensa_irq_unmask(d); -} - -static void xtensa_irq_disable(struct irq_data *d) -{ - xtensa_irq_mask(d); - variant_irq_disable(d->hwirq); -} - -static void xtensa_irq_ack(struct irq_data *d) -{ - set_sr(1 << d->hwirq, intclear); -} - -static int xtensa_irq_retrigger(struct irq_data *d) +int xtensa_irq_domain_xlate(const u32 *intspec, unsigned int intsize, + unsigned long int_irq, unsigned long ext_irq, + unsigned long *out_hwirq, unsigned int *out_type) { - set_sr(1 << d->hwirq, intset); - return 1; + if (WARN_ON(intsize < 1 || intsize > 2)) + return -EINVAL; + if (intsize == 2 && intspec[1] == 1) { + int_irq = xtensa_map_ext_irq(ext_irq); + if (int_irq < XCHAL_NUM_INTERRUPTS) + *out_hwirq = int_irq; + else + return -EINVAL; + } else { + *out_hwirq = int_irq; + } + *out_type = IRQ_TYPE_NONE; + return 0; } -static struct irq_chip xtensa_irq_chip = { - .name = "xtensa", - .irq_enable = xtensa_irq_enable, - .irq_disable = xtensa_irq_disable, - .irq_mask = xtensa_irq_mask, - .irq_unmask = xtensa_irq_unmask, - .irq_ack = xtensa_irq_ack, - .irq_retrigger = xtensa_irq_retrigger, -}; - -static int xtensa_irq_map(struct irq_domain *d, unsigned int irq, +int xtensa_irq_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) { + struct irq_chip *irq_chip = d->host_data; u32 mask = 1 << hw; if (mask & XCHAL_INTTYPE_MASK_SOFTWARE) { - irq_set_chip_and_handler_name(irq, &xtensa_irq_chip, + irq_set_chip_and_handler_name(irq, irq_chip, handle_simple_irq, "level"); irq_set_status_flags(irq, IRQ_LEVEL); } else if (mask & XCHAL_INTTYPE_MASK_EXTERN_EDGE) { - irq_set_chip_and_handler_name(irq, &xtensa_irq_chip, + irq_set_chip_and_handler_name(irq, irq_chip, handle_edge_irq, "edge"); irq_clear_status_flags(irq, IRQ_LEVEL); } else if (mask & XCHAL_INTTYPE_MASK_EXTERN_LEVEL) { - irq_set_chip_and_handler_name(irq, &xtensa_irq_chip, + irq_set_chip_and_handler_name(irq, irq_chip, handle_level_irq, "level"); irq_set_status_flags(irq, IRQ_LEVEL); } else if (mask & XCHAL_INTTYPE_MASK_TIMER) { - irq_set_chip_and_handler_name(irq, &xtensa_irq_chip, - handle_edge_irq, "edge"); + irq_set_chip_and_handler_name(irq, irq_chip, + handle_percpu_irq, "timer"); irq_clear_status_flags(irq, IRQ_LEVEL); } else {/* XCHAL_INTTYPE_MASK_WRITE_ERROR */ /* XCHAL_INTTYPE_MASK_NMI */ - - irq_set_chip_and_handler_name(irq, &xtensa_irq_chip, + irq_set_chip_and_handler_name(irq, irq_chip, handle_level_irq, "level"); irq_set_status_flags(irq, IRQ_LEVEL); } return 0; } -static unsigned map_ext_irq(unsigned ext_irq) +unsigned xtensa_map_ext_irq(unsigned ext_irq) { unsigned mask = XCHAL_INTTYPE_MASK_EXTERN_EDGE | XCHAL_INTTYPE_MASK_EXTERN_LEVEL; @@ -163,55 +128,61 @@ static unsigned map_ext_irq(unsigned ext_irq) return XCHAL_NUM_INTERRUPTS; } -/* - * Device Tree IRQ specifier translation function which works with one or - * two cell bindings. First cell value maps directly to the hwirq number. - * Second cell if present specifies whether hwirq number is external (1) or - * internal (0). - */ -int xtensa_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr, - const u32 *intspec, unsigned int intsize, - unsigned long *out_hwirq, unsigned int *out_type) +unsigned xtensa_get_ext_irq_no(unsigned irq) { - if (WARN_ON(intsize < 1 || intsize > 2)) - return -EINVAL; - if (intsize == 2 && intspec[1] == 1) { - unsigned int_irq = map_ext_irq(intspec[0]); - if (int_irq < XCHAL_NUM_INTERRUPTS) - *out_hwirq = int_irq; - else - return -EINVAL; - } else { - *out_hwirq = intspec[0]; - } - *out_type = IRQ_TYPE_NONE; - return 0; + unsigned mask = (XCHAL_INTTYPE_MASK_EXTERN_EDGE | + XCHAL_INTTYPE_MASK_EXTERN_LEVEL) & + ((1u << irq) - 1); + return hweight32(mask); } -static const struct irq_domain_ops xtensa_irq_domain_ops = { - .xlate = xtensa_irq_domain_xlate, - .map = xtensa_irq_map, -}; - void __init init_IRQ(void) { - struct device_node *intc = NULL; - - cached_irq_mask = 0; - set_sr(~0, intclear); - #ifdef CONFIG_OF - /* The interrupt controller device node is mandatory */ - intc = of_find_compatible_node(NULL, NULL, "xtensa,pic"); - BUG_ON(!intc); - - root_domain = irq_domain_add_linear(intc, NR_IRQS, - &xtensa_irq_domain_ops, NULL); + irqchip_init(); +#else +#ifdef CONFIG_HAVE_SMP + xtensa_mx_init_legacy(NULL); #else - root_domain = irq_domain_add_legacy(intc, NR_IRQS, 0, 0, - &xtensa_irq_domain_ops, NULL); + xtensa_pic_init_legacy(NULL); +#endif #endif - irq_set_default_host(root_domain); +#ifdef CONFIG_SMP + ipi_init(); +#endif variant_init_irq(); } + +#ifdef CONFIG_HOTPLUG_CPU +/* + * The CPU has been marked offline. Migrate IRQs off this CPU. If + * the affinity settings do not allow other CPUs, force them onto any + * available CPU. + */ +void migrate_irqs(void) +{ + unsigned int i, cpu = smp_processor_id(); + + for_each_active_irq(i) { + struct irq_data *data = irq_get_irq_data(i); + unsigned int newcpu; + + if (irqd_is_per_cpu(data)) + continue; + + if (!cpumask_test_cpu(cpu, data->affinity)) + continue; + + newcpu = cpumask_any_and(data->affinity, cpu_online_mask); + + if (newcpu >= nr_cpu_ids) { + pr_info_ratelimited("IRQ%u no longer affine to CPU%u\n", + i, cpu); + + cpumask_setall(data->affinity); + } + irq_set_affinity(i, data->affinity); + } +} +#endif /* CONFIG_HOTPLUG_CPU */ diff --git a/arch/xtensa/kernel/mcount.S b/arch/xtensa/kernel/mcount.S new file mode 100644 index 00000000000..0eeda2e4a25 --- /dev/null +++ b/arch/xtensa/kernel/mcount.S @@ -0,0 +1,50 @@ +/* + * arch/xtensa/kernel/mcount.S + * + * Xtensa specific mcount support + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2013 Tensilica Inc. + */ + +#include <linux/linkage.h> +#include <asm/ftrace.h> + +/* + * Entry condition: + * + * a2: a0 of the caller + */ + +ENTRY(_mcount) + + entry a1, 16 + + movi a4, ftrace_trace_function + l32i a4, a4, 0 + movi a3, ftrace_stub + bne a3, a4, 1f + retw + +1: xor a7, a2, a1 + movi a3, 0x3fffffff + and a7, a7, a3 + xor a7, a7, a1 + + xor a6, a0, a1 + and a6, a6, a3 + xor a6, a6, a1 + addi a6, a6, -MCOUNT_INSN_SIZE + callx4 a4 + + retw + +ENDPROC(_mcount) + +ENTRY(ftrace_stub) + entry a1, 16 + retw +ENDPROC(ftrace_stub) diff --git a/arch/xtensa/kernel/mxhead.S b/arch/xtensa/kernel/mxhead.S new file mode 100644 index 00000000000..77a161a112c --- /dev/null +++ b/arch/xtensa/kernel/mxhead.S @@ -0,0 +1,85 @@ +/* + * Xtensa Secondary Processors startup code. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 - 2013 Tensilica Inc. + * + * Joe Taylor <joe@tensilica.com> + * Chris Zankel <chris@zankel.net> + * Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca> + * Pete Delaney <piet@tensilica.com> + */ + +#include <linux/linkage.h> + +#include <asm/cacheasm.h> +#include <asm/initialize_mmu.h> +#include <asm/mxregs.h> +#include <asm/regs.h> + + + .section .SecondaryResetVector.text, "ax" + + +ENTRY(_SecondaryResetVector) + _j _SetupOCD + + .begin no-absolute-literals + .literal_position + +_SetupOCD: + /* + * Initialize WB, WS, and clear PS.EXCM (to allow loop instructions). + * Set Interrupt Level just below XCHAL_DEBUGLEVEL to allow + * xt-gdb to single step via DEBUG exceptions received directly + * by ocd. + */ + movi a1, 1 + movi a0, 0 + wsr a1, windowstart + wsr a0, windowbase + rsync + + movi a1, LOCKLEVEL + wsr a1, ps + rsync + +_SetupMMU: + Offset = _SetupMMU - _SecondaryResetVector + +#ifdef CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX + initialize_mmu +#endif + + /* + * Start Secondary Processors with NULL pointer to boot params. + */ + movi a2, 0 # a2 == NULL + movi a3, _startup + jx a3 + + .end no-absolute-literals + + + .section .SecondaryResetVector.remapped_text, "ax" + .global _RemappedSecondaryResetVector + + .org 0 # Need to do org before literals + +_RemappedSecondaryResetVector: + .begin no-absolute-literals + .literal_position + + _j _RemappedSetupMMU + . = _RemappedSecondaryResetVector + Offset + +_RemappedSetupMMU: + +#ifdef CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX + initialize_mmu +#endif + + .end no-absolute-literals diff --git a/arch/xtensa/kernel/pci.c b/arch/xtensa/kernel/pci.c index 126c1883940..5b3403388d7 100644 --- a/arch/xtensa/kernel/pci.c +++ b/arch/xtensa/kernel/pci.c @@ -77,9 +77,9 @@ pcibios_align_resource(void *data, const struct resource *res, if (res->flags & IORESOURCE_IO) { if (size > 0x100) { - printk(KERN_ERR "PCI: I/O Region %s/%d too large" - " (%ld bytes)\n", pci_name(dev), - dev->resource - res, size); + pr_err("PCI: I/O Region %s/%d too large (%u bytes)\n", + pci_name(dev), dev->resource - res, + size); } if (start & 0x300) @@ -174,7 +174,7 @@ static int __init pcibios_init(void) struct pci_controller *pci_ctrl; struct list_head resources; struct pci_bus *bus; - int next_busno = 0, i; + int next_busno = 0; printk("PCI: Probing PCI hardware\n"); @@ -197,7 +197,7 @@ static int __init pcibios_init(void) subsys_initcall(pcibios_init); -void __init pcibios_fixup_bus(struct pci_bus *bus) +void pcibios_fixup_bus(struct pci_bus *bus) { if (bus->parent) { /* This is a subordinate bridge */ diff --git a/arch/xtensa/kernel/platform.c b/arch/xtensa/kernel/platform.c index 44bf21c3769..1cf008284dd 100644 --- a/arch/xtensa/kernel/platform.c +++ b/arch/xtensa/kernel/platform.c @@ -29,18 +29,18 @@ */ _F(void, setup, (char** cmd), { }); -_F(void, init_irq, (void), { }); _F(void, restart, (void), { while(1); }); _F(void, halt, (void), { while(1); }); _F(void, power_off, (void), { while(1); }); _F(void, idle, (void), { __asm__ __volatile__ ("waiti 0" ::: "memory"); }); _F(void, heartbeat, (void), { }); _F(int, pcibios_fixup, (void), { return 0; }); +_F(void, pcibios_init, (void), { }); #ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT _F(void, calibrate_ccount, (void), { pr_err("ERROR: Cannot calibrate cpu frequency! Assuming 10MHz.\n"); - ccount_per_jiffy = 10 * (1000000UL/HZ); + ccount_freq = 10 * 1000000UL; }); #endif diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c index 0dd5784416d..1c85323f01d 100644 --- a/arch/xtensa/kernel/process.c +++ b/arch/xtensa/kernel/process.c @@ -105,19 +105,9 @@ void coprocessor_flush_all(struct thread_info *ti) /* * Powermanagement idle function, if any is provided by the platform. */ - -void cpu_idle(void) +void arch_cpu_idle(void) { - local_irq_enable(); - - /* endless idle loop with no priority at all */ - while (1) { - rcu_idle_enter(); - while (!need_resched()) - platform_idle(); - rcu_idle_exit(); - schedule_preempt_disabled(); - } + platform_idle(); } /* @@ -259,9 +249,10 @@ int copy_thread(unsigned long clone_flags, unsigned long usp_thread_fn, memcpy(&childregs->areg[XCHAL_NUM_AREGS - len/4], ®s->areg[XCHAL_NUM_AREGS - len/4], len); } -// FIXME: we need to set THREADPTR in thread_info... + + /* The thread pointer is passed in the '4th argument' (= a5) */ if (clone_flags & CLONE_SETTLS) - childregs->areg[2] = childregs->areg[6]; + childregs->threadptr = childregs->areg[5]; } else { p->thread.ra = MAKE_RA_FOR_CALL( (unsigned long)ret_from_kernel_thread, 1); diff --git a/arch/xtensa/kernel/ptrace.c b/arch/xtensa/kernel/ptrace.c index 61fb2e9e903..562fac66475 100644 --- a/arch/xtensa/kernel/ptrace.c +++ b/arch/xtensa/kernel/ptrace.c @@ -53,9 +53,8 @@ int ptrace_getregs(struct task_struct *child, void __user *uregs) { struct pt_regs *regs = task_pt_regs(child); xtensa_gregset_t __user *gregset = uregs; - unsigned long wm = regs->wmask; unsigned long wb = regs->windowbase; - int live, i; + int i; if (!access_ok(VERIFY_WRITE, uregs, sizeof(xtensa_gregset_t))) return -EIO; @@ -67,13 +66,11 @@ int ptrace_getregs(struct task_struct *child, void __user *uregs) __put_user(regs->lcount, &gregset->lcount); __put_user(regs->windowstart, &gregset->windowstart); __put_user(regs->windowbase, &gregset->windowbase); + __put_user(regs->threadptr, &gregset->threadptr); - live = (wm & 2) ? 4 : (wm & 4) ? 8 : (wm & 8) ? 12 : 16; - - for (i = 0; i < live; i++) - __put_user(regs->areg[i],gregset->a+((wb*4+i)%XCHAL_NUM_AREGS)); - for (i = XCHAL_NUM_AREGS - (wm >> 4) * 4; i < XCHAL_NUM_AREGS; i++) - __put_user(regs->areg[i],gregset->a+((wb*4+i)%XCHAL_NUM_AREGS)); + for (i = 0; i < XCHAL_NUM_AREGS; i++) + __put_user(regs->areg[i], + gregset->a + ((wb * 4 + i) % XCHAL_NUM_AREGS)); return 0; } @@ -84,7 +81,7 @@ int ptrace_setregs(struct task_struct *child, void __user *uregs) xtensa_gregset_t *gregset = uregs; const unsigned long ps_mask = PS_CALLINC_MASK | PS_OWB_MASK; unsigned long ps; - unsigned long wb; + unsigned long wb, ws; if (!access_ok(VERIFY_WRITE, uregs, sizeof(xtensa_gregset_t))) return -EIO; @@ -94,21 +91,33 @@ int ptrace_setregs(struct task_struct *child, void __user *uregs) __get_user(regs->lbeg, &gregset->lbeg); __get_user(regs->lend, &gregset->lend); __get_user(regs->lcount, &gregset->lcount); - __get_user(regs->windowstart, &gregset->windowstart); + __get_user(ws, &gregset->windowstart); __get_user(wb, &gregset->windowbase); + __get_user(regs->threadptr, &gregset->threadptr); regs->ps = (regs->ps & ~ps_mask) | (ps & ps_mask) | (1 << PS_EXCM_BIT); if (wb >= XCHAL_NUM_AREGS / 4) return -EFAULT; - regs->windowbase = wb; + if (wb != regs->windowbase || ws != regs->windowstart) { + unsigned long rotws, wmask; + + rotws = (((ws | (ws << WSBITS)) >> wb) & + ((1 << WSBITS) - 1)) & ~1; + wmask = ((rotws ? WSBITS + 1 - ffs(rotws) : 0) << 4) | + (rotws & 0xF) | 1; + regs->windowbase = wb; + regs->windowstart = ws; + regs->wmask = wmask; + } if (wb != 0 && __copy_from_user(regs->areg + XCHAL_NUM_AREGS - wb * 4, - gregset->a, wb * 16)) + gregset->a, wb * 16)) return -EFAULT; - if (__copy_from_user(regs->areg, gregset->a + wb*4, (WSBITS-wb) * 16)) + if (__copy_from_user(regs->areg, gregset->a + wb * 4, + (WSBITS - wb) * 16)) return -EFAULT; return 0; diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c index 24c1a57abb4..06370ccea9e 100644 --- a/arch/xtensa/kernel/setup.c +++ b/arch/xtensa/kernel/setup.c @@ -21,11 +21,11 @@ #include <linux/screen_info.h> #include <linux/bootmem.h> #include <linux/kernel.h> - -#ifdef CONFIG_OF +#include <linux/percpu.h> +#include <linux/clk-provider.h> +#include <linux/cpu.h> #include <linux/of_fdt.h> #include <linux/of_platform.h> -#endif #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE) # include <linux/console.h> @@ -40,6 +40,7 @@ #endif #include <asm/bootparam.h> +#include <asm/mmu_context.h> #include <asm/pgtable.h> #include <asm/processor.h> #include <asm/timex.h> @@ -48,6 +49,8 @@ #include <asm/setup.h> #include <asm/param.h> #include <asm/traps.h> +#include <asm/smp.h> +#include <asm/sysmem.h> #include <platform/hardware.h> @@ -64,14 +67,13 @@ extern struct rtc_ops no_rtc_ops; struct rtc_ops *rtc_ops; #ifdef CONFIG_BLK_DEV_INITRD -extern void *initrd_start; -extern void *initrd_end; +extern unsigned long initrd_start; +extern unsigned long initrd_end; int initrd_is_mapped = 0; extern int initrd_below_start_ok; #endif #ifdef CONFIG_OF -extern u32 __dtb_start[]; void *dtb_start = __dtb_start; #endif @@ -86,18 +88,6 @@ static char __initdata command_line[COMMAND_LINE_SIZE]; static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE; #endif -sysmem_info_t __initdata sysmem; - -#ifdef CONFIG_MMU -extern void init_mmu(void); -#else -static inline void init_mmu(void) { } -#endif - -extern int mem_reserve(unsigned long, unsigned long, int); -extern void bootmem_init(void); -extern void zones_init(void); - /* * Boot parameter parsing. * @@ -117,31 +107,14 @@ typedef struct tagtable { /* parse current tag */ -static int __init add_sysmem_bank(unsigned long type, unsigned long start, - unsigned long end) -{ - if (sysmem.nr_banks >= SYSMEM_BANKS_MAX) { - printk(KERN_WARNING - "Ignoring memory bank 0x%08lx size %ldKB\n", - start, end - start); - return -EINVAL; - } - sysmem.bank[sysmem.nr_banks].type = type; - sysmem.bank[sysmem.nr_banks].start = PAGE_ALIGN(start); - sysmem.bank[sysmem.nr_banks].end = end & PAGE_MASK; - sysmem.nr_banks++; - - return 0; -} - static int __init parse_tag_mem(const bp_tag_t *tag) { - meminfo_t *mi = (meminfo_t *)(tag->data); + struct bp_meminfo *mi = (struct bp_meminfo *)(tag->data); if (mi->type != MEMORY_TYPE_CONVENTIONAL) return -1; - return add_sysmem_bank(mi->type, mi->start, mi->end); + return add_sysmem_bank(mi->start, mi->end); } __tagtable(BP_TAG_MEMORY, parse_tag_mem); @@ -150,10 +123,10 @@ __tagtable(BP_TAG_MEMORY, parse_tag_mem); static int __init parse_tag_initrd(const bp_tag_t* tag) { - meminfo_t* mi; - mi = (meminfo_t*)(tag->data); - initrd_start = (void*)(mi->start); - initrd_end = (void*)(mi->end); + struct bp_meminfo *mi = (struct bp_meminfo *)(tag->data); + + initrd_start = (unsigned long)__va(mi->start); + initrd_end = (unsigned long)__va(mi->end); return 0; } @@ -164,20 +137,12 @@ __tagtable(BP_TAG_INITRD, parse_tag_initrd); static int __init parse_tag_fdt(const bp_tag_t *tag) { - dtb_start = (void *)(tag->data[0]); + dtb_start = __va(tag->data[0]); return 0; } __tagtable(BP_TAG_FDT, parse_tag_fdt); -void __init early_init_dt_setup_initrd_arch(unsigned long start, - unsigned long end) -{ - initrd_start = (void *)__va(start); - initrd_end = (void *)__va(end); - initrd_below_start_ok = 1; -} - #endif /* CONFIG_OF */ #endif /* CONFIG_BLK_DEV_INITRD */ @@ -223,11 +188,51 @@ static int __init parse_bootparam(const bp_tag_t* tag) } #ifdef CONFIG_OF +bool __initdata dt_memory_scan = false; + +#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY +unsigned long xtensa_kio_paddr = XCHAL_KIO_DEFAULT_PADDR; +EXPORT_SYMBOL(xtensa_kio_paddr); + +static int __init xtensa_dt_io_area(unsigned long node, const char *uname, + int depth, void *data) +{ + const __be32 *ranges; + int len; + + if (depth > 1) + return 0; + + if (!of_flat_dt_is_compatible(node, "simple-bus")) + return 0; + + ranges = of_get_flat_dt_prop(node, "ranges", &len); + if (!ranges) + return 1; + if (len == 0) + return 1; + + xtensa_kio_paddr = of_read_ulong(ranges+1, 1); + /* round down to nearest 256MB boundary */ + xtensa_kio_paddr &= 0xf0000000; + + return 1; +} +#else +static int __init xtensa_dt_io_area(unsigned long node, const char *uname, + int depth, void *data) +{ + return 1; +} +#endif void __init early_init_dt_add_memory_arch(u64 base, u64 size) { + if (!dt_memory_scan) + return; + size &= PAGE_MASK; - add_sysmem_bank(MEMORY_TYPE_CONVENTIONAL, base, base + size); + add_sysmem_bank(base, base + size); } void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align) @@ -237,36 +242,20 @@ void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align) void __init early_init_devtree(void *params) { - /* Setup flat device-tree pointer */ - initial_boot_params = params; - - /* Retrieve various informations from the /chosen node of the - * device-tree, including the platform type, initrd location and - * size, TCE reserve, and more ... - */ - if (!command_line[0]) - of_scan_flat_dt(early_init_dt_scan_chosen, command_line); - - /* Scan memory nodes and rebuild MEMBLOCKs */ - of_scan_flat_dt(early_init_dt_scan_root, NULL); if (sysmem.nr_banks == 0) - of_scan_flat_dt(early_init_dt_scan_memory, NULL); -} + dt_memory_scan = true; -static void __init copy_devtree(void) -{ - void *alloc = early_init_dt_alloc_memory_arch( - be32_to_cpu(initial_boot_params->totalsize), 0); - if (alloc) { - memcpy(alloc, initial_boot_params, - be32_to_cpu(initial_boot_params->totalsize)); - initial_boot_params = alloc; - } + early_init_dt_scan(params); + of_scan_flat_dt(xtensa_dt_io_area, NULL); + + if (!command_line[0]) + strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE); } static int __init xtensa_device_probe(void) { - of_platform_populate(NULL, NULL, NULL, NULL); + of_clk_init(NULL); + of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); return 0; } @@ -280,8 +269,6 @@ device_initcall(xtensa_device_probe); void __init init_arch(bp_tag_t *bp_start) { - sysmem.nr_banks = 0; - /* Parse boot parameters */ if (bp_start) @@ -292,10 +279,9 @@ void __init init_arch(bp_tag_t *bp_start) #endif if (sysmem.nr_banks == 0) { - sysmem.nr_banks = 1; - sysmem.bank[0].start = PLATFORM_DEFAULT_MEM_START; - sysmem.bank[0].end = PLATFORM_DEFAULT_MEM_START - + PLATFORM_DEFAULT_MEM_SIZE; + add_sysmem_bank(PLATFORM_DEFAULT_MEM_START, + PLATFORM_DEFAULT_MEM_START + + PLATFORM_DEFAULT_MEM_SIZE); } #ifdef CONFIG_CMDLINE_BOOL @@ -328,6 +314,27 @@ extern char _UserExceptionVector_literal_start; extern char _UserExceptionVector_text_end; extern char _DoubleExceptionVector_literal_start; extern char _DoubleExceptionVector_text_end; +#if XCHAL_EXCM_LEVEL >= 2 +extern char _Level2InterruptVector_text_start; +extern char _Level2InterruptVector_text_end; +#endif +#if XCHAL_EXCM_LEVEL >= 3 +extern char _Level3InterruptVector_text_start; +extern char _Level3InterruptVector_text_end; +#endif +#if XCHAL_EXCM_LEVEL >= 4 +extern char _Level4InterruptVector_text_start; +extern char _Level4InterruptVector_text_end; +#endif +#if XCHAL_EXCM_LEVEL >= 5 +extern char _Level5InterruptVector_text_start; +extern char _Level5InterruptVector_text_end; +#endif +#if XCHAL_EXCM_LEVEL >= 6 +extern char _Level6InterruptVector_text_start; +extern char _Level6InterruptVector_text_end; +#endif + #ifdef CONFIG_S32C1I_SELFTEST @@ -358,7 +365,8 @@ static inline int probed_compare_swap(int *v, int cmp, int set) /* Handle probed exception */ -void __init do_probed_exception(struct pt_regs *regs, unsigned long exccause) +static void __init do_probed_exception(struct pt_regs *regs, + unsigned long exccause) { if (regs->pc == rcw_probe_pc) { /* exception on s32c1i ? */ regs->pc += 3; /* skip the s32c1i instruction */ @@ -370,7 +378,7 @@ void __init do_probed_exception(struct pt_regs *regs, unsigned long exccause) /* Simple test of S32C1I (soc bringup assist) */ -void __init check_s32c1i(void) +static int __init check_s32c1i(void) { int n, cause1, cause2; void *handbus, *handdata, *handaddr; /* temporarily saved handlers */ @@ -425,24 +433,21 @@ void __init check_s32c1i(void) trap_set_handler(EXCCAUSE_LOAD_STORE_ERROR, handbus); trap_set_handler(EXCCAUSE_LOAD_STORE_DATA_ERROR, handdata); trap_set_handler(EXCCAUSE_LOAD_STORE_ADDR_ERROR, handaddr); + return 0; } #else /* XCHAL_HAVE_S32C1I */ /* This condition should not occur with a commercially deployed processor. Display reminder for early engr test or demo chips / FPGA bitstreams */ -void __init check_s32c1i(void) +static int __init check_s32c1i(void) { pr_warn("Processor configuration lacks atomic compare-and-swap support!\n"); + return 0; } #endif /* XCHAL_HAVE_S32C1I */ -#else /* CONFIG_S32C1I_SELFTEST */ - -void __init check_s32c1i(void) -{ -} - +early_initcall(check_s32c1i); #endif /* CONFIG_S32C1I_SELFTEST */ @@ -451,14 +456,12 @@ void __init setup_arch(char **cmdline_p) strlcpy(boot_command_line, command_line, COMMAND_LINE_SIZE); *cmdline_p = command_line; - check_s32c1i(); - /* Reserve some memory regions */ #ifdef CONFIG_BLK_DEV_INITRD if (initrd_start < initrd_end) { initrd_is_mapped = mem_reserve(__pa(initrd_start), - __pa(initrd_end), 0); + __pa(initrd_end), 0) == 0; initrd_below_start_ok = 1; } else { initrd_start = 0; @@ -482,15 +485,38 @@ void __init setup_arch(char **cmdline_p) mem_reserve(__pa(&_DoubleExceptionVector_literal_start), __pa(&_DoubleExceptionVector_text_end), 0); +#if XCHAL_EXCM_LEVEL >= 2 + mem_reserve(__pa(&_Level2InterruptVector_text_start), + __pa(&_Level2InterruptVector_text_end), 0); +#endif +#if XCHAL_EXCM_LEVEL >= 3 + mem_reserve(__pa(&_Level3InterruptVector_text_start), + __pa(&_Level3InterruptVector_text_end), 0); +#endif +#if XCHAL_EXCM_LEVEL >= 4 + mem_reserve(__pa(&_Level4InterruptVector_text_start), + __pa(&_Level4InterruptVector_text_end), 0); +#endif +#if XCHAL_EXCM_LEVEL >= 5 + mem_reserve(__pa(&_Level5InterruptVector_text_start), + __pa(&_Level5InterruptVector_text_end), 0); +#endif +#if XCHAL_EXCM_LEVEL >= 6 + mem_reserve(__pa(&_Level6InterruptVector_text_start), + __pa(&_Level6InterruptVector_text_end), 0); +#endif + + parse_early_param(); bootmem_init(); -#ifdef CONFIG_OF - copy_devtree(); - unflatten_device_tree(); -#endif + unflatten_and_copy_device_tree(); platform_setup(cmdline_p); +#ifdef CONFIG_SMP + smp_init_cpus(); +#endif + paging_init(); zones_init(); @@ -507,6 +533,22 @@ void __init setup_arch(char **cmdline_p) #endif } +static DEFINE_PER_CPU(struct cpu, cpu_data); + +static int __init topology_init(void) +{ + int i; + + for_each_possible_cpu(i) { + struct cpu *cpu = &per_cpu(cpu_data, i); + cpu->hotpluggable = !!i; + register_cpu(cpu, i); + } + + return 0; +} +subsys_initcall(topology_init); + void machine_restart(char * cmd) { platform_restart(); @@ -532,21 +574,27 @@ void machine_power_off(void) static int c_show(struct seq_file *f, void *slot) { + char buf[NR_CPUS * 5]; + + cpulist_scnprintf(buf, sizeof(buf), cpu_online_mask); /* high-level stuff */ - seq_printf(f,"processor\t: 0\n" - "vendor_id\t: Tensilica\n" - "model\t\t: Xtensa " XCHAL_HW_VERSION_NAME "\n" - "core ID\t\t: " XCHAL_CORE_ID "\n" - "build ID\t: 0x%x\n" - "byte order\t: %s\n" - "cpu MHz\t\t: %lu.%02lu\n" - "bogomips\t: %lu.%02lu\n", - XCHAL_BUILD_UNIQUE_ID, - XCHAL_HAVE_BE ? "big" : "little", - CCOUNT_PER_JIFFY/(1000000/HZ), - (CCOUNT_PER_JIFFY/(10000/HZ)) % 100, - loops_per_jiffy/(500000/HZ), - (loops_per_jiffy/(5000/HZ)) % 100); + seq_printf(f, "CPU count\t: %u\n" + "CPU list\t: %s\n" + "vendor_id\t: Tensilica\n" + "model\t\t: Xtensa " XCHAL_HW_VERSION_NAME "\n" + "core ID\t\t: " XCHAL_CORE_ID "\n" + "build ID\t: 0x%x\n" + "byte order\t: %s\n" + "cpu MHz\t\t: %lu.%02lu\n" + "bogomips\t: %lu.%02lu\n", + num_online_cpus(), + buf, + XCHAL_BUILD_UNIQUE_ID, + XCHAL_HAVE_BE ? "big" : "little", + ccount_freq/1000000, + (ccount_freq/10000) % 100, + loops_per_jiffy/(500000/HZ), + (loops_per_jiffy/(5000/HZ)) % 100); seq_printf(f,"flags\t\t: " #if XCHAL_HAVE_NMI @@ -658,7 +706,7 @@ c_show(struct seq_file *f, void *slot) static void * c_start(struct seq_file *f, loff_t *pos) { - return (void *) ((*pos == 0) ? (void *)1 : NULL); + return (*pos == 0) ? (void *)1 : NULL; } static void * @@ -674,10 +722,10 @@ c_stop(struct seq_file *f, void *v) const struct seq_operations cpuinfo_op = { - start: c_start, - next: c_next, - stop: c_stop, - show: c_show + .start = c_start, + .next = c_next, + .stop = c_stop, + .show = c_show, }; #endif /* CONFIG_PROC_FS */ diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c index de34d6be91c..98b67d5f151 100644 --- a/arch/xtensa/kernel/signal.c +++ b/arch/xtensa/kernel/signal.c @@ -265,7 +265,7 @@ asmlinkage long xtensa_rt_sigreturn(long a0, long a1, long a2, long a3, ret = regs->areg[2]; - if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->areg[1]) == -EFAULT) + if (restore_altstack(&frame->uc.uc_stack)) goto badframe; return ret; @@ -337,11 +337,11 @@ static int setup_frame(int sig, struct k_sigaction *ka, siginfo_t *info, struct rt_sigframe *frame; int err = 0; int signal; - unsigned long sp, ra; + unsigned long sp, ra, tp; sp = regs->areg[1]; - if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! on_sig_stack(sp)) { + if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && sas_ss_flags(sp) == 0) { sp = current->sas_ss_sp + current->sas_ss_size; } @@ -368,11 +368,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, siginfo_t *info, err |= __put_user(0, &frame->uc.uc_flags); err |= __put_user(0, &frame->uc.uc_link); - err |= __put_user((void *)current->sas_ss_sp, - &frame->uc.uc_stack.ss_sp); - err |= __put_user(sas_ss_flags(regs->areg[1]), - &frame->uc.uc_stack.ss_flags); - err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); + err |= __save_altstack(&frame->uc.uc_stack, regs->areg[1]); err |= setup_sigcontext(frame, regs); err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); @@ -395,7 +391,8 @@ static int setup_frame(int sig, struct k_sigaction *ka, siginfo_t *info, * Return context not modified until this point. */ - /* Set up registers for signal handler */ + /* Set up registers for signal handler; preserve the threadptr */ + tp = regs->threadptr; start_thread(regs, (unsigned long) ka->sa.sa_handler, (unsigned long) frame); @@ -406,6 +403,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, siginfo_t *info, regs->areg[6] = (unsigned long) signal; regs->areg[7] = (unsigned long) &frame->info; regs->areg[8] = (unsigned long) &frame->uc; + regs->threadptr = tp; /* Set access mode to USER_DS. Nomenclature is outdated, but * functionality is used in uaccess.h @@ -424,16 +422,6 @@ give_sigsegv: return -EFAULT; } -asmlinkage long xtensa_sigaltstack(const stack_t __user *uss, - stack_t __user *uoss, - long a2, long a3, long a4, long a5, - struct pt_regs *regs) -{ - return do_sigaltstack(uss, uoss, regs->areg[1]); -} - - - /* * Note that 'init' is a special process: it doesn't get signals it doesn't * want to handle. Thus you cannot kill init even with a SIGKILL even by diff --git a/arch/xtensa/kernel/smp.c b/arch/xtensa/kernel/smp.c new file mode 100644 index 00000000000..40b5a3771fb --- /dev/null +++ b/arch/xtensa/kernel/smp.c @@ -0,0 +1,607 @@ +/* + * Xtensa SMP support functions. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2008 - 2013 Tensilica Inc. + * + * Chris Zankel <chris@zankel.net> + * Joe Taylor <joe@tensilica.com> + * Pete Delaney <piet@tensilica.com + */ + +#include <linux/cpu.h> +#include <linux/cpumask.h> +#include <linux/delay.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/irqdomain.h> +#include <linux/irq.h> +#include <linux/kdebug.h> +#include <linux/module.h> +#include <linux/reboot.h> +#include <linux/seq_file.h> +#include <linux/smp.h> +#include <linux/thread_info.h> + +#include <asm/cacheflush.h> +#include <asm/kdebug.h> +#include <asm/mmu_context.h> +#include <asm/mxregs.h> +#include <asm/platform.h> +#include <asm/tlbflush.h> +#include <asm/traps.h> + +#ifdef CONFIG_SMP +# if XCHAL_HAVE_S32C1I == 0 +# error "The S32C1I option is required for SMP." +# endif +#endif + +static void system_invalidate_dcache_range(unsigned long start, + unsigned long size); +static void system_flush_invalidate_dcache_range(unsigned long start, + unsigned long size); + +/* IPI (Inter Process Interrupt) */ + +#define IPI_IRQ 0 + +static irqreturn_t ipi_interrupt(int irq, void *dev_id); +static struct irqaction ipi_irqaction = { + .handler = ipi_interrupt, + .flags = IRQF_PERCPU, + .name = "ipi", +}; + +void ipi_init(void) +{ + unsigned irq = irq_create_mapping(NULL, IPI_IRQ); + setup_irq(irq, &ipi_irqaction); +} + +static inline unsigned int get_core_count(void) +{ + /* Bits 18..21 of SYSCFGID contain the core count minus 1. */ + unsigned int syscfgid = get_er(SYSCFGID); + return ((syscfgid >> 18) & 0xf) + 1; +} + +static inline int get_core_id(void) +{ + /* Bits 0...18 of SYSCFGID contain the core id */ + unsigned int core_id = get_er(SYSCFGID); + return core_id & 0x3fff; +} + +void __init smp_prepare_cpus(unsigned int max_cpus) +{ + unsigned i; + + for (i = 0; i < max_cpus; ++i) + set_cpu_present(i, true); +} + +void __init smp_init_cpus(void) +{ + unsigned i; + unsigned int ncpus = get_core_count(); + unsigned int core_id = get_core_id(); + + pr_info("%s: Core Count = %d\n", __func__, ncpus); + pr_info("%s: Core Id = %d\n", __func__, core_id); + + for (i = 0; i < ncpus; ++i) + set_cpu_possible(i, true); +} + +void __init smp_prepare_boot_cpu(void) +{ + unsigned int cpu = smp_processor_id(); + BUG_ON(cpu != 0); + cpu_asid_cache(cpu) = ASID_USER_FIRST; +} + +void __init smp_cpus_done(unsigned int max_cpus) +{ +} + +static int boot_secondary_processors = 1; /* Set with xt-gdb via .xt-gdb */ +static DECLARE_COMPLETION(cpu_running); + +void secondary_start_kernel(void) +{ + struct mm_struct *mm = &init_mm; + unsigned int cpu = smp_processor_id(); + + init_mmu(); + +#ifdef CONFIG_DEBUG_KERNEL + if (boot_secondary_processors == 0) { + pr_debug("%s: boot_secondary_processors:%d; Hanging cpu:%d\n", + __func__, boot_secondary_processors, cpu); + for (;;) + __asm__ __volatile__ ("waiti " __stringify(LOCKLEVEL)); + } + + pr_debug("%s: boot_secondary_processors:%d; Booting cpu:%d\n", + __func__, boot_secondary_processors, cpu); +#endif + /* Init EXCSAVE1 */ + + secondary_trap_init(); + + /* All kernel threads share the same mm context. */ + + atomic_inc(&mm->mm_users); + atomic_inc(&mm->mm_count); + current->active_mm = mm; + cpumask_set_cpu(cpu, mm_cpumask(mm)); + enter_lazy_tlb(mm, current); + + preempt_disable(); + trace_hardirqs_off(); + + calibrate_delay(); + + notify_cpu_starting(cpu); + + secondary_init_irq(); + local_timer_setup(cpu); + + set_cpu_online(cpu, true); + + local_irq_enable(); + + complete(&cpu_running); + + cpu_startup_entry(CPUHP_ONLINE); +} + +static void mx_cpu_start(void *p) +{ + unsigned cpu = (unsigned)p; + unsigned long run_stall_mask = get_er(MPSCORE); + + set_er(run_stall_mask & ~(1u << cpu), MPSCORE); + pr_debug("%s: cpu: %d, run_stall_mask: %lx ---> %lx\n", + __func__, cpu, run_stall_mask, get_er(MPSCORE)); +} + +static void mx_cpu_stop(void *p) +{ + unsigned cpu = (unsigned)p; + unsigned long run_stall_mask = get_er(MPSCORE); + + set_er(run_stall_mask | (1u << cpu), MPSCORE); + pr_debug("%s: cpu: %d, run_stall_mask: %lx ---> %lx\n", + __func__, cpu, run_stall_mask, get_er(MPSCORE)); +} + +#ifdef CONFIG_HOTPLUG_CPU +unsigned long cpu_start_id __cacheline_aligned; +#endif +unsigned long cpu_start_ccount; + +static int boot_secondary(unsigned int cpu, struct task_struct *ts) +{ + unsigned long timeout = jiffies + msecs_to_jiffies(1000); + unsigned long ccount; + int i; + +#ifdef CONFIG_HOTPLUG_CPU + cpu_start_id = cpu; + system_flush_invalidate_dcache_range( + (unsigned long)&cpu_start_id, sizeof(cpu_start_id)); +#endif + smp_call_function_single(0, mx_cpu_start, (void *)cpu, 1); + + for (i = 0; i < 2; ++i) { + do + ccount = get_ccount(); + while (!ccount); + + cpu_start_ccount = ccount; + + while (time_before(jiffies, timeout)) { + mb(); + if (!cpu_start_ccount) + break; + } + + if (cpu_start_ccount) { + smp_call_function_single(0, mx_cpu_stop, + (void *)cpu, 1); + cpu_start_ccount = 0; + return -EIO; + } + } + return 0; +} + +int __cpu_up(unsigned int cpu, struct task_struct *idle) +{ + int ret = 0; + + if (cpu_asid_cache(cpu) == 0) + cpu_asid_cache(cpu) = ASID_USER_FIRST; + + start_info.stack = (unsigned long)task_pt_regs(idle); + wmb(); + + pr_debug("%s: Calling wakeup_secondary(cpu:%d, idle:%p, sp: %08lx)\n", + __func__, cpu, idle, start_info.stack); + + ret = boot_secondary(cpu, idle); + if (ret == 0) { + wait_for_completion_timeout(&cpu_running, + msecs_to_jiffies(1000)); + if (!cpu_online(cpu)) + ret = -EIO; + } + + if (ret) + pr_err("CPU %u failed to boot\n", cpu); + + return ret; +} + +#ifdef CONFIG_HOTPLUG_CPU + +/* + * __cpu_disable runs on the processor to be shutdown. + */ +int __cpu_disable(void) +{ + unsigned int cpu = smp_processor_id(); + + /* + * Take this CPU offline. Once we clear this, we can't return, + * and we must not schedule until we're ready to give up the cpu. + */ + set_cpu_online(cpu, false); + + /* + * OK - migrate IRQs away from this CPU + */ + migrate_irqs(); + + /* + * Flush user cache and TLB mappings, and then remove this CPU + * from the vm mask set of all processes. + */ + local_flush_cache_all(); + local_flush_tlb_all(); + invalidate_page_directory(); + + clear_tasks_mm_cpumask(cpu); + + return 0; +} + +static void platform_cpu_kill(unsigned int cpu) +{ + smp_call_function_single(0, mx_cpu_stop, (void *)cpu, true); +} + +/* + * called on the thread which is asking for a CPU to be shutdown - + * waits until shutdown has completed, or it is timed out. + */ +void __cpu_die(unsigned int cpu) +{ + unsigned long timeout = jiffies + msecs_to_jiffies(1000); + while (time_before(jiffies, timeout)) { + system_invalidate_dcache_range((unsigned long)&cpu_start_id, + sizeof(cpu_start_id)); + if (cpu_start_id == -cpu) { + platform_cpu_kill(cpu); + return; + } + } + pr_err("CPU%u: unable to kill\n", cpu); +} + +void arch_cpu_idle_dead(void) +{ + cpu_die(); +} +/* + * Called from the idle thread for the CPU which has been shutdown. + * + * Note that we disable IRQs here, but do not re-enable them + * before returning to the caller. This is also the behaviour + * of the other hotplug-cpu capable cores, so presumably coming + * out of idle fixes this. + */ +void __ref cpu_die(void) +{ + idle_task_exit(); + local_irq_disable(); + __asm__ __volatile__( + " movi a2, cpu_restart\n" + " jx a2\n"); +} + +#endif /* CONFIG_HOTPLUG_CPU */ + +enum ipi_msg_type { + IPI_RESCHEDULE = 0, + IPI_CALL_FUNC, + IPI_CPU_STOP, + IPI_MAX +}; + +static const struct { + const char *short_text; + const char *long_text; +} ipi_text[] = { + { .short_text = "RES", .long_text = "Rescheduling interrupts" }, + { .short_text = "CAL", .long_text = "Function call interrupts" }, + { .short_text = "DIE", .long_text = "CPU shutdown interrupts" }, +}; + +struct ipi_data { + unsigned long ipi_count[IPI_MAX]; +}; + +static DEFINE_PER_CPU(struct ipi_data, ipi_data); + +static void send_ipi_message(const struct cpumask *callmask, + enum ipi_msg_type msg_id) +{ + int index; + unsigned long mask = 0; + + for_each_cpu(index, callmask) + if (index != smp_processor_id()) + mask |= 1 << index; + + set_er(mask, MIPISET(msg_id)); +} + +void arch_send_call_function_ipi_mask(const struct cpumask *mask) +{ + send_ipi_message(mask, IPI_CALL_FUNC); +} + +void arch_send_call_function_single_ipi(int cpu) +{ + send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC); +} + +void smp_send_reschedule(int cpu) +{ + send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE); +} + +void smp_send_stop(void) +{ + struct cpumask targets; + + cpumask_copy(&targets, cpu_online_mask); + cpumask_clear_cpu(smp_processor_id(), &targets); + send_ipi_message(&targets, IPI_CPU_STOP); +} + +static void ipi_cpu_stop(unsigned int cpu) +{ + set_cpu_online(cpu, false); + machine_halt(); +} + +irqreturn_t ipi_interrupt(int irq, void *dev_id) +{ + unsigned int cpu = smp_processor_id(); + struct ipi_data *ipi = &per_cpu(ipi_data, cpu); + unsigned int msg; + unsigned i; + + msg = get_er(MIPICAUSE(cpu)); + for (i = 0; i < IPI_MAX; i++) + if (msg & (1 << i)) { + set_er(1 << i, MIPICAUSE(cpu)); + ++ipi->ipi_count[i]; + } + + if (msg & (1 << IPI_RESCHEDULE)) + scheduler_ipi(); + if (msg & (1 << IPI_CALL_FUNC)) + generic_smp_call_function_interrupt(); + if (msg & (1 << IPI_CPU_STOP)) + ipi_cpu_stop(cpu); + + return IRQ_HANDLED; +} + +void show_ipi_list(struct seq_file *p, int prec) +{ + unsigned int cpu; + unsigned i; + + for (i = 0; i < IPI_MAX; ++i) { + seq_printf(p, "%*s:", prec, ipi_text[i].short_text); + for_each_online_cpu(cpu) + seq_printf(p, " %10lu", + per_cpu(ipi_data, cpu).ipi_count[i]); + seq_printf(p, " %s\n", ipi_text[i].long_text); + } +} + +int setup_profiling_timer(unsigned int multiplier) +{ + pr_debug("setup_profiling_timer %d\n", multiplier); + return 0; +} + +/* TLB flush functions */ + +struct flush_data { + struct vm_area_struct *vma; + unsigned long addr1; + unsigned long addr2; +}; + +static void ipi_flush_tlb_all(void *arg) +{ + local_flush_tlb_all(); +} + +void flush_tlb_all(void) +{ + on_each_cpu(ipi_flush_tlb_all, NULL, 1); +} + +static void ipi_flush_tlb_mm(void *arg) +{ + local_flush_tlb_mm(arg); +} + +void flush_tlb_mm(struct mm_struct *mm) +{ + on_each_cpu(ipi_flush_tlb_mm, mm, 1); +} + +static void ipi_flush_tlb_page(void *arg) +{ + struct flush_data *fd = arg; + local_flush_tlb_page(fd->vma, fd->addr1); +} + +void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) +{ + struct flush_data fd = { + .vma = vma, + .addr1 = addr, + }; + on_each_cpu(ipi_flush_tlb_page, &fd, 1); +} + +static void ipi_flush_tlb_range(void *arg) +{ + struct flush_data *fd = arg; + local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2); +} + +void flush_tlb_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end) +{ + struct flush_data fd = { + .vma = vma, + .addr1 = start, + .addr2 = end, + }; + on_each_cpu(ipi_flush_tlb_range, &fd, 1); +} + +static void ipi_flush_tlb_kernel_range(void *arg) +{ + struct flush_data *fd = arg; + local_flush_tlb_kernel_range(fd->addr1, fd->addr2); +} + +void flush_tlb_kernel_range(unsigned long start, unsigned long end) +{ + struct flush_data fd = { + .addr1 = start, + .addr2 = end, + }; + on_each_cpu(ipi_flush_tlb_kernel_range, &fd, 1); +} + +/* Cache flush functions */ + +static void ipi_flush_cache_all(void *arg) +{ + local_flush_cache_all(); +} + +void flush_cache_all(void) +{ + on_each_cpu(ipi_flush_cache_all, NULL, 1); +} + +static void ipi_flush_cache_page(void *arg) +{ + struct flush_data *fd = arg; + local_flush_cache_page(fd->vma, fd->addr1, fd->addr2); +} + +void flush_cache_page(struct vm_area_struct *vma, + unsigned long address, unsigned long pfn) +{ + struct flush_data fd = { + .vma = vma, + .addr1 = address, + .addr2 = pfn, + }; + on_each_cpu(ipi_flush_cache_page, &fd, 1); +} + +static void ipi_flush_cache_range(void *arg) +{ + struct flush_data *fd = arg; + local_flush_cache_range(fd->vma, fd->addr1, fd->addr2); +} + +void flush_cache_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end) +{ + struct flush_data fd = { + .vma = vma, + .addr1 = start, + .addr2 = end, + }; + on_each_cpu(ipi_flush_cache_range, &fd, 1); +} + +static void ipi_flush_icache_range(void *arg) +{ + struct flush_data *fd = arg; + local_flush_icache_range(fd->addr1, fd->addr2); +} + +void flush_icache_range(unsigned long start, unsigned long end) +{ + struct flush_data fd = { + .addr1 = start, + .addr2 = end, + }; + on_each_cpu(ipi_flush_icache_range, &fd, 1); +} + +/* ------------------------------------------------------------------------- */ + +static void ipi_invalidate_dcache_range(void *arg) +{ + struct flush_data *fd = arg; + __invalidate_dcache_range(fd->addr1, fd->addr2); +} + +static void system_invalidate_dcache_range(unsigned long start, + unsigned long size) +{ + struct flush_data fd = { + .addr1 = start, + .addr2 = size, + }; + on_each_cpu(ipi_invalidate_dcache_range, &fd, 1); +} + +static void ipi_flush_invalidate_dcache_range(void *arg) +{ + struct flush_data *fd = arg; + __flush_invalidate_dcache_range(fd->addr1, fd->addr2); +} + +static void system_flush_invalidate_dcache_range(unsigned long start, + unsigned long size) +{ + struct flush_data fd = { + .addr1 = start, + .addr2 = size, + }; + on_each_cpu(ipi_flush_invalidate_dcache_range, &fd, 1); +} diff --git a/arch/xtensa/kernel/stacktrace.c b/arch/xtensa/kernel/stacktrace.c new file mode 100644 index 00000000000..7d2c317bd98 --- /dev/null +++ b/arch/xtensa/kernel/stacktrace.c @@ -0,0 +1,120 @@ +/* + * arch/xtensa/kernel/stacktrace.c + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 - 2013 Tensilica Inc. + */ +#include <linux/export.h> +#include <linux/sched.h> +#include <linux/stacktrace.h> + +#include <asm/stacktrace.h> +#include <asm/traps.h> + +void walk_stackframe(unsigned long *sp, + int (*fn)(struct stackframe *frame, void *data), + void *data) +{ + unsigned long a0, a1; + unsigned long sp_end; + + a1 = (unsigned long)sp; + sp_end = ALIGN(a1, THREAD_SIZE); + + spill_registers(); + + while (a1 < sp_end) { + struct stackframe frame; + + sp = (unsigned long *)a1; + + a0 = *(sp - 4); + a1 = *(sp - 3); + + if (a1 <= (unsigned long)sp) + break; + + frame.pc = MAKE_PC_FROM_RA(a0, a1); + frame.sp = a1; + + if (fn(&frame, data)) + return; + } +} + +#ifdef CONFIG_STACKTRACE + +struct stack_trace_data { + struct stack_trace *trace; + unsigned skip; +}; + +static int stack_trace_cb(struct stackframe *frame, void *data) +{ + struct stack_trace_data *trace_data = data; + struct stack_trace *trace = trace_data->trace; + + if (trace_data->skip) { + --trace_data->skip; + return 0; + } + if (!kernel_text_address(frame->pc)) + return 0; + + trace->entries[trace->nr_entries++] = frame->pc; + return trace->nr_entries >= trace->max_entries; +} + +void save_stack_trace_tsk(struct task_struct *task, struct stack_trace *trace) +{ + struct stack_trace_data trace_data = { + .trace = trace, + .skip = trace->skip, + }; + walk_stackframe(stack_pointer(task), stack_trace_cb, &trace_data); +} +EXPORT_SYMBOL_GPL(save_stack_trace_tsk); + +void save_stack_trace(struct stack_trace *trace) +{ + save_stack_trace_tsk(current, trace); +} +EXPORT_SYMBOL_GPL(save_stack_trace); + +#endif + +#ifdef CONFIG_FRAME_POINTER + +struct return_addr_data { + unsigned long addr; + unsigned skip; +}; + +static int return_address_cb(struct stackframe *frame, void *data) +{ + struct return_addr_data *r = data; + + if (r->skip) { + --r->skip; + return 0; + } + if (!kernel_text_address(frame->pc)) + return 0; + r->addr = frame->pc; + return 1; +} + +unsigned long return_address(unsigned level) +{ + struct return_addr_data r = { + .skip = level + 1, + }; + walk_stackframe(stack_pointer(NULL), return_address_cb, &r); + return r.addr; +} +EXPORT_SYMBOL(return_address); + +#endif diff --git a/arch/xtensa/kernel/syscall.c b/arch/xtensa/kernel/syscall.c index 54fa8425cee..5d3f7a119ed 100644 --- a/arch/xtensa/kernel/syscall.c +++ b/arch/xtensa/kernel/syscall.c @@ -36,6 +36,10 @@ syscall_t sys_call_table[__NR_syscall_count] /* FIXME __cacheline_aligned */= { #include <uapi/asm/unistd.h> }; +#define COLOUR_ALIGN(addr, pgoff) \ + ((((addr) + SHMLBA - 1) & ~(SHMLBA - 1)) + \ + (((pgoff) << PAGE_SHIFT) & (SHMLBA - 1))) + asmlinkage long xtensa_shmat(int shmid, char __user *shmaddr, int shmflg) { unsigned long ret; @@ -52,3 +56,40 @@ asmlinkage long xtensa_fadvise64_64(int fd, int advice, { return sys_fadvise64_64(fd, offset, len, advice); } + +unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, + unsigned long len, unsigned long pgoff, unsigned long flags) +{ + struct vm_area_struct *vmm; + + if (flags & MAP_FIXED) { + /* We do not accept a shared mapping if it would violate + * cache aliasing constraints. + */ + if ((flags & MAP_SHARED) && + ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))) + return -EINVAL; + return addr; + } + + if (len > TASK_SIZE) + return -ENOMEM; + if (!addr) + addr = TASK_UNMAPPED_BASE; + + if (flags & MAP_SHARED) + addr = COLOUR_ALIGN(addr, pgoff); + else + addr = PAGE_ALIGN(addr); + + for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) { + /* At this point: (!vmm || addr < vmm->vm_end). */ + if (TASK_SIZE - len < addr) + return -ENOMEM; + if (!vmm || addr + len <= vmm->vm_start) + return addr; + addr = vmm->vm_end; + if (flags & MAP_SHARED) + addr = COLOUR_ALIGN(addr, pgoff); + } +} diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c index ffb47410431..2a1823de69c 100644 --- a/arch/xtensa/kernel/time.c +++ b/arch/xtensa/kernel/time.c @@ -16,6 +16,7 @@ #include <linux/sched.h> #include <linux/time.h> #include <linux/clocksource.h> +#include <linux/clockchips.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/init.h> @@ -23,95 +24,155 @@ #include <linux/profile.h> #include <linux/delay.h> #include <linux/irqdomain.h> +#include <linux/sched_clock.h> #include <asm/timex.h> #include <asm/platform.h> -#ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT -unsigned long ccount_per_jiffy; /* per 1/HZ */ -unsigned long nsec_per_ccount; /* nsec per ccount increment */ -#endif +unsigned long ccount_freq; /* ccount Hz */ +EXPORT_SYMBOL(ccount_freq); static cycle_t ccount_read(struct clocksource *cs) { return (cycle_t)get_ccount(); } +static u64 notrace ccount_sched_clock_read(void) +{ + return get_ccount(); +} + static struct clocksource ccount_clocksource = { .name = "ccount", .rating = 200, .read = ccount_read, .mask = CLOCKSOURCE_MASK(32), + .flags = CLOCK_SOURCE_IS_CONTINUOUS, +}; + +static int ccount_timer_set_next_event(unsigned long delta, + struct clock_event_device *dev); +static void ccount_timer_set_mode(enum clock_event_mode mode, + struct clock_event_device *evt); +struct ccount_timer { + struct clock_event_device evt; + int irq_enabled; + char name[24]; }; +static DEFINE_PER_CPU(struct ccount_timer, ccount_timer); + +static int ccount_timer_set_next_event(unsigned long delta, + struct clock_event_device *dev) +{ + unsigned long flags, next; + int ret = 0; + + local_irq_save(flags); + next = get_ccount() + delta; + set_linux_timer(next); + if (next - get_ccount() > delta) + ret = -ETIME; + local_irq_restore(flags); + + return ret; +} + +static void ccount_timer_set_mode(enum clock_event_mode mode, + struct clock_event_device *evt) +{ + struct ccount_timer *timer = + container_of(evt, struct ccount_timer, evt); + + /* + * There is no way to disable the timer interrupt at the device level, + * only at the intenable register itself. Since enable_irq/disable_irq + * calls are nested, we need to make sure that these calls are + * balanced. + */ + switch (mode) { + case CLOCK_EVT_MODE_SHUTDOWN: + case CLOCK_EVT_MODE_UNUSED: + if (timer->irq_enabled) { + disable_irq(evt->irq); + timer->irq_enabled = 0; + } + break; + case CLOCK_EVT_MODE_RESUME: + case CLOCK_EVT_MODE_ONESHOT: + if (!timer->irq_enabled) { + enable_irq(evt->irq); + timer->irq_enabled = 1; + } + default: + break; + } +} static irqreturn_t timer_interrupt(int irq, void *dev_id); static struct irqaction timer_irqaction = { .handler = timer_interrupt, - .flags = IRQF_DISABLED, + .flags = IRQF_TIMER, .name = "timer", }; +void local_timer_setup(unsigned cpu) +{ + struct ccount_timer *timer = &per_cpu(ccount_timer, cpu); + struct clock_event_device *clockevent = &timer->evt; + + timer->irq_enabled = 1; + clockevent->name = timer->name; + snprintf(timer->name, sizeof(timer->name), "ccount_clockevent_%u", cpu); + clockevent->features = CLOCK_EVT_FEAT_ONESHOT; + clockevent->rating = 300; + clockevent->set_next_event = ccount_timer_set_next_event; + clockevent->set_mode = ccount_timer_set_mode; + clockevent->cpumask = cpumask_of(cpu); + clockevent->irq = irq_create_mapping(NULL, LINUX_TIMER_INT); + if (WARN(!clockevent->irq, "error: can't map timer irq")) + return; + clockevents_config_and_register(clockevent, ccount_freq, + 0xf, 0xffffffff); +} + void __init time_init(void) { - unsigned int irq; #ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT printk("Calibrating CPU frequency "); platform_calibrate_ccount(); - printk("%d.%02d MHz\n", (int)ccount_per_jiffy/(1000000/HZ), - (int)(ccount_per_jiffy/(10000/HZ))%100); + printk("%d.%02d MHz\n", (int)ccount_freq/1000000, + (int)(ccount_freq/10000)%100); +#else + ccount_freq = CONFIG_XTENSA_CPU_CLOCK*1000000UL; #endif - clocksource_register_hz(&ccount_clocksource, CCOUNT_PER_JIFFY * HZ); - - /* Initialize the linux timer interrupt. */ - - irq = irq_create_mapping(NULL, LINUX_TIMER_INT); - setup_irq(irq, &timer_irqaction); - set_linux_timer(get_ccount() + CCOUNT_PER_JIFFY); + clocksource_register_hz(&ccount_clocksource, ccount_freq); + local_timer_setup(0); + setup_irq(this_cpu_ptr(&ccount_timer)->evt.irq, &timer_irqaction); + sched_clock_register(ccount_sched_clock_read, 32, ccount_freq); + clocksource_of_init(); } /* * The timer interrupt is called HZ times per second. */ -irqreturn_t timer_interrupt (int irq, void *dev_id) +irqreturn_t timer_interrupt(int irq, void *dev_id) { + struct clock_event_device *evt = &this_cpu_ptr(&ccount_timer)->evt; - unsigned long next; - - next = get_linux_timer(); - -again: - while ((signed long)(get_ccount() - next) > 0) { - - profile_tick(CPU_PROFILING); -#ifndef CONFIG_SMP - update_process_times(user_mode(get_irq_regs())); -#endif - - xtime_update(1); /* Linux handler in kernel/time/timekeeping */ - - /* Note that writing CCOMPARE clears the interrupt. */ - - next += CCOUNT_PER_JIFFY; - set_linux_timer(next); - } + set_linux_timer(get_linux_timer()); + evt->event_handler(evt); /* Allow platform to do something useful (Wdog). */ - platform_heartbeat(); - /* Make sure we didn't miss any tick... */ - - if ((signed long)(get_ccount() - next) > 0) - goto again; - return IRQ_HANDLED; } #ifndef CONFIG_GENERIC_CALIBRATE_DELAY -void __cpuinit calibrate_delay(void) +void calibrate_delay(void) { - loops_per_jiffy = CCOUNT_PER_JIFFY; + loops_per_jiffy = ccount_freq / HZ; printk("Calibrating delay loop (skipped)... " "%lu.%02lu BogoMIPS preset\n", loops_per_jiffy/(1000000/HZ), diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c index 01e0111bf78..eebbfd8c26f 100644 --- a/arch/xtensa/kernel/traps.c +++ b/arch/xtensa/kernel/traps.c @@ -11,7 +11,7 @@ * * Essentially rewritten for the Xtensa architecture port. * - * Copyright (C) 2001 - 2005 Tensilica Inc. + * Copyright (C) 2001 - 2013 Tensilica Inc. * * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com> * Chris Zankel <chris@zankel.net> @@ -32,11 +32,13 @@ #include <linux/delay.h> #include <linux/hardirq.h> +#include <asm/stacktrace.h> #include <asm/ptrace.h> #include <asm/timex.h> #include <asm/uaccess.h> #include <asm/pgtable.h> #include <asm/processor.h> +#include <asm/traps.h> #ifdef CONFIG_KGDB extern int gdb_enter; @@ -155,7 +157,7 @@ COPROCESSOR(7), * 2. it is a temporary memory buffer for the exception handlers. */ -unsigned long exc_table[EXC_TABLE_SIZE/4]; +DEFINE_PER_CPU(unsigned long, exc_table[EXC_TABLE_SIZE/4]); void die(const char*, struct pt_regs*, long); @@ -193,30 +195,48 @@ void do_multihit(struct pt_regs *regs, unsigned long exccause) } /* - * Level-1 interrupt. - * We currently have no priority encoding. + * IRQ handler. */ -unsigned long ignored_level1_interrupts; extern void do_IRQ(int, struct pt_regs *); -void do_interrupt (struct pt_regs *regs) +void do_interrupt(struct pt_regs *regs) { - unsigned long intread = get_sr (interrupt); - unsigned long intenable = get_sr (intenable); - int i, mask; + static const unsigned int_level_mask[] = { + 0, + XCHAL_INTLEVEL1_MASK, + XCHAL_INTLEVEL2_MASK, + XCHAL_INTLEVEL3_MASK, + XCHAL_INTLEVEL4_MASK, + XCHAL_INTLEVEL5_MASK, + XCHAL_INTLEVEL6_MASK, + XCHAL_INTLEVEL7_MASK, + }; + struct pt_regs *old_regs = set_irq_regs(regs); + + irq_enter(); + + for (;;) { + unsigned intread = get_sr(interrupt); + unsigned intenable = get_sr(intenable); + unsigned int_at_level = intread & intenable; + unsigned level; + + for (level = LOCKLEVEL; level > 0; --level) { + if (int_at_level & int_level_mask[level]) { + int_at_level &= int_level_mask[level]; + break; + } + } - /* Handle all interrupts (no priorities). - * (Clear the interrupt before processing, in case it's - * edge-triggered or software-generated) - */ + if (level == 0) + break; - for (i=0, mask = 1; i < XCHAL_NUM_INTERRUPTS; i++, mask <<= 1) { - if (mask & (intread & intenable)) { - set_sr (mask, intclear); - do_IRQ (i,regs); - } + do_IRQ(__ffs(int_at_level), regs); } + + irq_exit(); + set_irq_regs(old_regs); } /* @@ -293,17 +313,31 @@ do_debug(struct pt_regs *regs) } +static void set_handler(int idx, void *handler) +{ + unsigned int cpu; + + for_each_possible_cpu(cpu) + per_cpu(exc_table, cpu)[idx] = (unsigned long)handler; +} + /* Set exception C handler - for temporary use when probing exceptions */ void * __init trap_set_handler(int cause, void *handler) { - unsigned long *entry = &exc_table[EXC_TABLE_DEFAULT / 4 + cause]; - void *previous = (void *)*entry; - *entry = (unsigned long)handler; + void *previous = (void *)per_cpu(exc_table, 0)[ + EXC_TABLE_DEFAULT / 4 + cause]; + set_handler(EXC_TABLE_DEFAULT / 4 + cause, handler); return previous; } +static void trap_init_excsave(void) +{ + unsigned long excsave1 = (unsigned long)this_cpu_ptr(exc_table); + __asm__ __volatile__("wsr %0, excsave1\n" : : "a" (excsave1)); +} + /* * Initialize dispatch tables. * @@ -317,8 +351,6 @@ void * __init trap_set_handler(int cause, void *handler) * See vectors.S for more details. */ -#define set_handler(idx,handler) (exc_table[idx] = (unsigned long) (handler)) - void __init trap_init(void) { int i; @@ -348,10 +380,15 @@ void __init trap_init(void) } /* Initialize EXCSAVE_1 to hold the address of the exception table. */ + trap_init_excsave(); +} - i = (unsigned long)exc_table; - __asm__ __volatile__("wsr %0, excsave1\n" : : "a" (i)); +#ifdef CONFIG_SMP +void secondary_trap_init(void) +{ + trap_init_excsave(); } +#endif /* * This function dumps the current valid window frame and other base registers. @@ -361,6 +398,8 @@ void show_regs(struct pt_regs * regs) { int i, wmask; + show_regs_print_info(KERN_DEFAULT); + wmask = regs->wmask & ~1; for (i = 0; i < 16; i++) { @@ -380,73 +419,25 @@ void show_regs(struct pt_regs * regs) regs->syscall); } -static __always_inline unsigned long *stack_pointer(struct task_struct *task) -{ - unsigned long *sp; - - if (!task || task == current) - __asm__ __volatile__ ("mov %0, a1\n" : "=a"(sp)); - else - sp = (unsigned long *)task->thread.sp; - - return sp; -} - -static inline void spill_registers(void) +static int show_trace_cb(struct stackframe *frame, void *data) { - unsigned int a0, ps; - - __asm__ __volatile__ ( - "movi a14, " __stringify(PS_EXCM_BIT | 1) "\n\t" - "mov a12, a0\n\t" - "rsr a13, sar\n\t" - "xsr a14, ps\n\t" - "movi a0, _spill_registers\n\t" - "rsync\n\t" - "callx0 a0\n\t" - "mov a0, a12\n\t" - "wsr a13, sar\n\t" - "wsr a14, ps\n\t" - :: "a" (&a0), "a" (&ps) - : "a2", "a3", "a4", "a7", "a11", "a12", "a13", "a14", "a15", - "memory"); + if (kernel_text_address(frame->pc)) { + printk(" [<%08lx>] ", frame->pc); + print_symbol("%s\n", frame->pc); + } + return 0; } void show_trace(struct task_struct *task, unsigned long *sp) { - unsigned long a0, a1, pc; - unsigned long sp_start, sp_end; - - if (sp) - a1 = (unsigned long)sp; - else - a1 = (unsigned long)stack_pointer(task); - - sp_start = a1 & ~(THREAD_SIZE-1); - sp_end = sp_start + THREAD_SIZE; + if (!sp) + sp = stack_pointer(task); printk("Call Trace:"); #ifdef CONFIG_KALLSYMS printk("\n"); #endif - spill_registers(); - - while (a1 > sp_start && a1 < sp_end) { - sp = (unsigned long*)a1; - - a0 = *(sp - 4); - a1 = *(sp - 3); - - if (a1 <= (unsigned long) sp) - break; - - pc = MAKE_PC_FROM_RA(a0, a1); - - if (kernel_text_address(pc)) { - printk(" [<%08lx>] ", pc); - print_symbol("%s\n", pc); - } - } + walk_stackframe(sp, show_trace_cb, NULL); printk("\n"); } @@ -479,14 +470,6 @@ void show_stack(struct task_struct *task, unsigned long *sp) show_trace(task, stack); } -void dump_stack(void) -{ - show_stack(current, NULL); -} - -EXPORT_SYMBOL(dump_stack); - - void show_code(unsigned int *pc) { long i; @@ -524,7 +507,7 @@ void die(const char * str, struct pt_regs * regs, long err) if (!user_mode(regs)) show_stack(NULL, (unsigned long*)regs->areg[1]); - add_taint(TAINT_DIE); + add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); spin_unlock_irq(&die_lock); if (in_interrupt()) diff --git a/arch/xtensa/kernel/vectors.S b/arch/xtensa/kernel/vectors.S index 68df35f66ce..8453e6e3989 100644 --- a/arch/xtensa/kernel/vectors.S +++ b/arch/xtensa/kernel/vectors.S @@ -10,7 +10,7 @@ * Public License. See the file "COPYING" in the main directory of * this archive for more details. * - * Copyright (C) 2005 Tensilica, Inc. + * Copyright (C) 2005 - 2008 Tensilica, Inc. * * Chris Zankel <chris@zankel.net> * @@ -50,6 +50,7 @@ #include <asm/processor.h> #include <asm/page.h> #include <asm/thread_info.h> +#include <asm/vectors.h> #define WINDOW_VECTORS_SIZE 0x180 @@ -77,6 +78,7 @@ ENTRY(_UserExceptionVector) s32i a0, a2, PT_DEPC # mark it as a regular exception addx4 a0, a0, a3 # find entry in table l32i a0, a0, EXC_TABLE_FAST_USER # load handler + xsr a3, excsave1 # restore a3 and dispatch table jx a0 ENDPROC(_UserExceptionVector) @@ -103,6 +105,7 @@ ENTRY(_KernelExceptionVector) s32i a0, a2, PT_DEPC # mark it as a regular exception addx4 a0, a0, a3 # find entry in table l32i a0, a0, EXC_TABLE_FAST_KERNEL # load handler address + xsr a3, excsave1 # restore a3 and dispatch table jx a0 ENDPROC(_KernelExceptionVector) @@ -167,7 +170,7 @@ ENDPROC(_KernelExceptionVector) * * a0: DEPC * a1: a1 - * a2: trashed, original value in EXC_TABLE_DOUBLE_A2 + * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE * a3: exctable * depc: a0 * excsave_1: a3 @@ -203,47 +206,46 @@ ENDPROC(_KernelExceptionVector) .section .DoubleExceptionVector.text, "ax" .begin literal_prefix .DoubleExceptionVector + .globl _DoubleExceptionVector_WindowUnderflow + .globl _DoubleExceptionVector_WindowOverflow ENTRY(_DoubleExceptionVector) - /* Deliberately destroy excsave (don't assume it's value was valid). */ - - wsr a3, excsave1 # save a3 + xsr a3, excsave1 + s32i a2, a3, EXC_TABLE_DOUBLE_SAVE /* Check for kernel double exception (usually fatal). */ - rsr a3, ps - _bbci.l a3, PS_UM_BIT, .Lksp + rsr a2, ps + _bbci.l a2, PS_UM_BIT, .Lksp /* Check if we are currently handling a window exception. */ /* Note: We don't need to indicate that we enter a critical section. */ xsr a0, depc # get DEPC, save a0 - movi a3, XCHAL_WINDOW_VECTORS_VADDR - _bltu a0, a3, .Lfixup - addi a3, a3, WINDOW_VECTORS_SIZE - _bgeu a0, a3, .Lfixup + movi a2, WINDOW_VECTORS_VADDR + _bltu a0, a2, .Lfixup + addi a2, a2, WINDOW_VECTORS_SIZE + _bgeu a0, a2, .Lfixup /* Window overflow/underflow exception. Get stack pointer. */ - mov a3, a2 - /* This explicit literal and the following references to it are made - * in order to fit DoubleExceptionVector.literals into the available - * 16-byte gap before DoubleExceptionVector.text in the absence of - * link time relaxation. See kernel/vmlinux.lds.S - */ - .literal .Lexc_table, exc_table - l32r a2, .Lexc_table - l32i a2, a2, EXC_TABLE_KSTK + l32i a2, a3, EXC_TABLE_KSTK /* Check for overflow/underflow exception, jump if overflow. */ - _bbci.l a0, 6, .Lovfl - - /* a0: depc, a1: a1, a2: kstk, a3: a2, depc: a0, excsave: a3 */ + bbci.l a0, 6, _DoubleExceptionVector_WindowOverflow - /* Restart window underflow exception. + /* + * Restart window underflow exception. + * Currently: + * depc = orig a0, + * a0 = orig DEPC, + * a2 = new sp based on KSTK from exc_table + * a3 = excsave_1 + * excsave_1 = orig a3 + * * We return to the instruction in user space that caused the window * underflow exception. Therefore, we change window base to the value * before we entered the window underflow exception and prepare the @@ -251,10 +253,11 @@ ENTRY(_DoubleExceptionVector) * by changing depc (in a0). * Note: We can trash the current window frame (a0...a3) and depc! */ - +_DoubleExceptionVector_WindowUnderflow: + xsr a3, excsave1 wsr a2, depc # save stack pointer temporarily rsr a0, ps - extui a0, a0, PS_OWB_SHIFT, 4 + extui a0, a0, PS_OWB_SHIFT, PS_OWB_WIDTH wsr a0, windowbase rsync @@ -262,28 +265,57 @@ ENTRY(_DoubleExceptionVector) xsr a2, depc # save a2 and get stack pointer s32i a0, a2, PT_AREG0 - - wsr a3, excsave1 # save a3 - l32r a3, .Lexc_table - + xsr a3, excsave1 rsr a0, exccause s32i a0, a2, PT_DEPC # mark it as a regular exception addx4 a0, a0, a3 + xsr a3, excsave1 l32i a0, a0, EXC_TABLE_FAST_USER jx a0 -.Lfixup:/* Check for a fixup handler or if we were in a critical section. */ + /* + * We only allow the ITLB miss exception if we are in kernel space. + * All other exceptions are unexpected and thus unrecoverable! + */ + +#ifdef CONFIG_MMU + .extern fast_second_level_miss_double_kernel + +.Lksp: /* a0: a0, a1: a1, a2: a2, a3: trashed, depc: depc, excsave: a3 */ + + rsr a3, exccause + beqi a3, EXCCAUSE_ITLB_MISS, 1f + addi a3, a3, -EXCCAUSE_DTLB_MISS + bnez a3, .Lunrecoverable +1: movi a3, fast_second_level_miss_double_kernel + jx a3 +#else +.equ .Lksp, .Lunrecoverable +#endif + + /* Critical! We can't handle this situation. PANIC! */ + + .extern unrecoverable_exception + +.Lunrecoverable_fixup: + l32i a2, a3, EXC_TABLE_DOUBLE_SAVE + xsr a0, depc - /* a0: depc, a1: a1, a2: a2, a3: trashed, depc: a0, excsave1: a3 */ +.Lunrecoverable: + rsr a3, excsave1 + wsr a0, excsave1 + movi a0, unrecoverable_exception + callx0 a0 - l32r a3, .Lexc_table - s32i a2, a3, EXC_TABLE_DOUBLE_SAVE # temporary variable +.Lfixup:/* Check for a fixup handler or if we were in a critical section. */ + + /* a0: depc, a1: a1, a2: trash, a3: exctable, depc: a0, excsave1: a3 */ /* Enter critical section. */ l32i a2, a3, EXC_TABLE_FIXUP s32i a3, a3, EXC_TABLE_FIXUP - beq a2, a3, .Lunrecoverable_fixup # critical! + beq a2, a3, .Lunrecoverable_fixup # critical section beqz a2, .Ldflt # no handler was registered /* a0: depc, a1: a1, a2: trash, a3: exctable, depc: a0, excsave: a3 */ @@ -292,64 +324,265 @@ ENTRY(_DoubleExceptionVector) .Ldflt: /* Get stack pointer. */ - l32i a3, a3, EXC_TABLE_DOUBLE_SAVE - addi a2, a3, -PT_USER_SIZE - -.Lovfl: /* Jump to default handlers. */ + l32i a2, a3, EXC_TABLE_DOUBLE_SAVE + addi a2, a2, -PT_USER_SIZE - /* a0: depc, a1: a1, a2: kstk, a3: a2, depc: a0, excsave: a3 */ + /* a0: depc, a1: a1, a2: kstk, a3: exctable, depc: a0, excsave: a3 */ - xsr a3, depc s32i a0, a2, PT_DEPC - s32i a3, a2, PT_AREG0 + l32i a0, a3, EXC_TABLE_DOUBLE_SAVE + xsr a0, depc + s32i a0, a2, PT_AREG0 - /* a0: avail, a1: a1, a2: kstk, a3: avail, depc: a2, excsave: a3 */ + /* a0: avail, a1: a1, a2: kstk, a3: exctable, depc: a2, excsave: a3 */ - l32r a3, .Lexc_table rsr a0, exccause addx4 a0, a0, a3 + xsr a3, excsave1 l32i a0, a0, EXC_TABLE_FAST_USER jx a0 /* - * We only allow the ITLB miss exception if we are in kernel space. - * All other exceptions are unexpected and thus unrecoverable! + * Restart window OVERFLOW exception. + * Currently: + * depc = orig a0, + * a0 = orig DEPC, + * a2 = new sp based on KSTK from exc_table + * a3 = EXCSAVE_1 + * excsave_1 = orig a3 + * + * We return to the instruction in user space that caused the window + * overflow exception. Therefore, we change window base to the value + * before we entered the window overflow exception and prepare the + * registers to return as if we were coming from a regular exception + * by changing DEPC (in a0). + * + * NOTE: We CANNOT trash the current window frame (a0...a3), but we + * can clobber depc. + * + * The tricky part here is that overflow8 and overflow12 handlers + * save a0, then clobber a0. To restart the handler, we have to restore + * a0 if the double exception was past the point where a0 was clobbered. + * + * To keep things simple, we take advantage of the fact all overflow + * handlers save a0 in their very first instruction. If DEPC was past + * that instruction, we can safely restore a0 from where it was saved + * on the stack. + * + * a0: depc, a1: a1, a2: kstk, a3: exc_table, depc: a0, excsave1: a3 */ +_DoubleExceptionVector_WindowOverflow: + extui a2, a0, 0, 6 # get offset into 64-byte vector handler + beqz a2, 1f # if at start of vector, don't restore -#ifdef CONFIG_MMU - .extern fast_second_level_miss_double_kernel + addi a0, a0, -128 + bbsi.l a0, 8, 1f # don't restore except for overflow 8 and 12 -.Lksp: /* a0: a0, a1: a1, a2: a2, a3: trashed, depc: depc, excsave: a3 */ + /* + * This fixup handler is for the extremely unlikely case where the + * overflow handler's reference thru a0 gets a hardware TLB refill + * that bumps out the (distinct, aliasing) TLB entry that mapped its + * prior references thru a9/a13, and where our reference now thru + * a9/a13 gets a 2nd-level miss exception (not hardware TLB refill). + */ + movi a2, window_overflow_restore_a0_fixup + s32i a2, a3, EXC_TABLE_FIXUP + l32i a2, a3, EXC_TABLE_DOUBLE_SAVE + xsr a3, excsave1 - rsr a3, exccause - beqi a3, EXCCAUSE_ITLB_MISS, 1f - addi a3, a3, -EXCCAUSE_DTLB_MISS - bnez a3, .Lunrecoverable -1: movi a3, fast_second_level_miss_double_kernel - jx a3 -#else -.equ .Lksp, .Lunrecoverable -#endif + bbsi.l a0, 7, 2f - /* Critical! We can't handle this situation. PANIC! */ + /* + * Restore a0 as saved by _WindowOverflow8(). + */ - .extern unrecoverable_exception + l32e a0, a9, -16 + wsr a0, depc # replace the saved a0 + j 3f + +2: + /* + * Restore a0 as saved by _WindowOverflow12(). + */ + + l32e a0, a13, -16 + wsr a0, depc # replace the saved a0 +3: + xsr a3, excsave1 + movi a0, 0 + s32i a0, a3, EXC_TABLE_FIXUP + s32i a2, a3, EXC_TABLE_DOUBLE_SAVE +1: + /* + * Restore WindowBase while leaving all address registers restored. + * We have to use ROTW for this, because WSR.WINDOWBASE requires + * an address register (which would prevent restore). + * + * Window Base goes from 0 ... 7 (Module 8) + * Window Start is 8 bits; Ex: (0b1010 1010):0x55 from series of call4s + */ + + rsr a0, ps + extui a0, a0, PS_OWB_SHIFT, PS_OWB_WIDTH + rsr a2, windowbase + sub a0, a2, a0 + extui a0, a0, 0, 3 -.Lunrecoverable_fixup: l32i a2, a3, EXC_TABLE_DOUBLE_SAVE - xsr a0, depc + xsr a3, excsave1 + beqi a0, 1, .L1pane + beqi a0, 3, .L3pane -.Lunrecoverable: - rsr a3, excsave1 - wsr a0, excsave1 - movi a0, unrecoverable_exception - callx0 a0 + rsr a0, depc + rotw -2 + + /* + * We are now in the user code's original window frame. + * Process the exception as a user exception as if it was + * taken by the user code. + * + * This is similar to the user exception vector, + * except that PT_DEPC isn't set to EXCCAUSE. + */ +1: + xsr a3, excsave1 + wsr a2, depc + l32i a2, a3, EXC_TABLE_KSTK + s32i a0, a2, PT_AREG0 + rsr a0, exccause + + s32i a0, a2, PT_DEPC + +_DoubleExceptionVector_handle_exception: + addx4 a0, a0, a3 + l32i a0, a0, EXC_TABLE_FAST_USER + xsr a3, excsave1 + jx a0 + +.L1pane: + rsr a0, depc + rotw -1 + j 1b + +.L3pane: + rsr a0, depc + rotw -3 + j 1b - .end literal_prefix ENDPROC(_DoubleExceptionVector) /* + * Fixup handler for TLB miss in double exception handler for window owerflow. + * We get here with windowbase set to the window that was being spilled and + * a0 trashed. a0 bit 7 determines if this is a call8 (bit clear) or call12 + * (bit set) window. + * + * We do the following here: + * - go to the original window retaining a0 value; + * - set up exception stack to return back to appropriate a0 restore code + * (we'll need to rotate window back and there's no place to save this + * information, use different return address for that); + * - handle the exception; + * - go to the window that was being spilled; + * - set up window_overflow_restore_a0_fixup as a fixup routine; + * - reload a0; + * - restore the original window; + * - reset the default fixup routine; + * - return to user. By the time we get to this fixup handler all information + * about the conditions of the original double exception that happened in + * the window overflow handler is lost, so we just return to userspace to + * retry overflow from start. + * + * a0: value of depc, original value in depc + * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE + * a3: exctable, original value in excsave1 + */ + +ENTRY(window_overflow_restore_a0_fixup) + + rsr a0, ps + extui a0, a0, PS_OWB_SHIFT, PS_OWB_WIDTH + rsr a2, windowbase + sub a0, a2, a0 + extui a0, a0, 0, 3 + l32i a2, a3, EXC_TABLE_DOUBLE_SAVE + xsr a3, excsave1 + + _beqi a0, 1, .Lhandle_1 + _beqi a0, 3, .Lhandle_3 + + .macro overflow_fixup_handle_exception_pane n + + rsr a0, depc + rotw -\n + + xsr a3, excsave1 + wsr a2, depc + l32i a2, a3, EXC_TABLE_KSTK + s32i a0, a2, PT_AREG0 + + movi a0, .Lrestore_\n + s32i a0, a2, PT_DEPC + rsr a0, exccause + j _DoubleExceptionVector_handle_exception + + .endm + + overflow_fixup_handle_exception_pane 2 +.Lhandle_1: + overflow_fixup_handle_exception_pane 1 +.Lhandle_3: + overflow_fixup_handle_exception_pane 3 + + .macro overflow_fixup_restore_a0_pane n + + rotw \n + /* Need to preserve a0 value here to be able to handle exception + * that may occur on a0 reload from stack. It may occur because + * TLB miss handler may not be atomic and pointer to page table + * may be lost before we get here. There are no free registers, + * so we need to use EXC_TABLE_DOUBLE_SAVE area. + */ + xsr a3, excsave1 + s32i a2, a3, EXC_TABLE_DOUBLE_SAVE + movi a2, window_overflow_restore_a0_fixup + s32i a2, a3, EXC_TABLE_FIXUP + l32i a2, a3, EXC_TABLE_DOUBLE_SAVE + xsr a3, excsave1 + bbsi.l a0, 7, 1f + l32e a0, a9, -16 + j 2f +1: + l32e a0, a13, -16 +2: + rotw -\n + + .endm + +.Lrestore_2: + overflow_fixup_restore_a0_pane 2 + +.Lset_default_fixup: + xsr a3, excsave1 + s32i a2, a3, EXC_TABLE_DOUBLE_SAVE + movi a2, 0 + s32i a2, a3, EXC_TABLE_FIXUP + l32i a2, a3, EXC_TABLE_DOUBLE_SAVE + xsr a3, excsave1 + rfe + +.Lrestore_1: + overflow_fixup_restore_a0_pane 1 + j .Lset_default_fixup +.Lrestore_3: + overflow_fixup_restore_a0_pane 3 + j .Lset_default_fixup + +ENDPROC(window_overflow_restore_a0_fixup) + + .end literal_prefix +/* * Debug interrupt vector * * There is not much space here, so simply jump to another handler. @@ -366,6 +599,44 @@ ENTRY(_DebugInterruptVector) ENDPROC(_DebugInterruptVector) + +/* + * Medium priority level interrupt vectors + * + * Each takes less than 16 (0x10) bytes, no literals, by placing + * the extra 8 bytes that would otherwise be required in the window + * vectors area where there is space. With relocatable vectors, + * all vectors are within ~ 4 kB range of each other, so we can + * simply jump (J) to another vector without having to use JX. + * + * common_exception code gets current IRQ level in PS.INTLEVEL + * and preserves it for the IRQ handling time. + */ + + .macro irq_entry_level level + + .if XCHAL_EXCM_LEVEL >= \level + .section .Level\level\()InterruptVector.text, "ax" +ENTRY(_Level\level\()InterruptVector) + wsr a0, excsave2 + rsr a0, epc\level + wsr a0, epc1 + movi a0, EXCCAUSE_LEVEL1_INTERRUPT + wsr a0, exccause + rsr a0, eps\level + # branch to user or kernel vector + j _SimulateUserKernelVectorException + .endif + + .endm + + irq_entry_level 2 + irq_entry_level 3 + irq_entry_level 4 + irq_entry_level 5 + irq_entry_level 6 + + /* Window overflow and underflow handlers. * The handlers must be 64 bytes apart, first starting with the underflow * handlers underflow-4 to underflow-12, then the overflow handlers @@ -396,6 +667,24 @@ ENTRY_ALIGN64(_WindowOverflow4) ENDPROC(_WindowOverflow4) +#if XCHAL_EXCM_LEVEL >= 2 + /* Not a window vector - but a convenient location + * (where we know there's space) for continuation of + * medium priority interrupt dispatch code. + * On entry here, a0 contains PS, and EPC2 contains saved a0: + */ + .align 4 +_SimulateUserKernelVectorException: + addi a0, a0, (1 << PS_EXCM_BIT) + wsr a0, ps + bbsi.l a0, PS_UM_BIT, 1f # branch if user mode + rsr a0, excsave2 # restore a0 + j _KernelExceptionVector # simulate kernel vector exception +1: rsr a0, excsave2 # restore a0 + j _UserExceptionVector # simulate user vector exception +#endif + + /* 4-Register Window Underflow Vector (Handler) */ ENTRY_ALIGN64(_WindowUnderflow4) diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S index 255154f820b..d16db6df86f 100644 --- a/arch/xtensa/kernel/vmlinux.lds.S +++ b/arch/xtensa/kernel/vmlinux.lds.S @@ -7,7 +7,7 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Copyright (C) 2001 - 2005 Tensilica Inc. + * Copyright (C) 2001 - 2008 Tensilica Inc. * * Chris Zankel <chris@zankel.net> * Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca> @@ -18,6 +18,7 @@ #include <asm/page.h> #include <asm/thread_info.h> +#include <asm/vectors.h> #include <variant/core.h> #include <platform/hardware.h> OUTPUT_ARCH(xtensa) @@ -30,7 +31,7 @@ jiffies = jiffies_64; #endif #ifndef KERNELOFFSET -#define KERNELOFFSET 0xd0001000 +#define KERNELOFFSET 0xd0003000 #endif /* Note: In the following macros, it would be nice to specify only the @@ -134,6 +135,26 @@ SECTIONS RELOCATE_ENTRY(_WindowVectors_text, .WindowVectors.text); +#if XCHAL_EXCM_LEVEL >= 2 + RELOCATE_ENTRY(_Level2InterruptVector_text, + .Level2InterruptVector.text); +#endif +#if XCHAL_EXCM_LEVEL >= 3 + RELOCATE_ENTRY(_Level3InterruptVector_text, + .Level3InterruptVector.text); +#endif +#if XCHAL_EXCM_LEVEL >= 4 + RELOCATE_ENTRY(_Level4InterruptVector_text, + .Level4InterruptVector.text); +#endif +#if XCHAL_EXCM_LEVEL >= 5 + RELOCATE_ENTRY(_Level5InterruptVector_text, + .Level5InterruptVector.text); +#endif +#if XCHAL_EXCM_LEVEL >= 6 + RELOCATE_ENTRY(_Level6InterruptVector_text, + .Level6InterruptVector.text); +#endif RELOCATE_ENTRY(_KernelExceptionVector_text, .KernelExceptionVector.text); RELOCATE_ENTRY(_UserExceptionVector_text, @@ -144,6 +165,13 @@ SECTIONS .DoubleExceptionVector.text); RELOCATE_ENTRY(_DebugInterruptVector_text, .DebugInterruptVector.text); +#if defined(CONFIG_SMP) + RELOCATE_ENTRY(_SecondaryResetVector_literal, + .SecondaryResetVector.literal); + RELOCATE_ENTRY(_SecondaryResetVector_text, + .SecondaryResetVector.text); +#endif + __boot_reloc_table_end = ABSOLUTE(.) ; @@ -165,50 +193,111 @@ SECTIONS SECTION_VECTOR (_WindowVectors_text, .WindowVectors.text, - XCHAL_WINDOW_VECTORS_VADDR, 4, + WINDOW_VECTORS_VADDR, 4, .dummy) SECTION_VECTOR (_DebugInterruptVector_literal, .DebugInterruptVector.literal, - XCHAL_DEBUG_VECTOR_VADDR - 4, + DEBUG_VECTOR_VADDR - 4, SIZEOF(.WindowVectors.text), .WindowVectors.text) SECTION_VECTOR (_DebugInterruptVector_text, .DebugInterruptVector.text, - XCHAL_DEBUG_VECTOR_VADDR, + DEBUG_VECTOR_VADDR, 4, .DebugInterruptVector.literal) +#undef LAST +#define LAST .DebugInterruptVector.text +#if XCHAL_EXCM_LEVEL >= 2 + SECTION_VECTOR (_Level2InterruptVector_text, + .Level2InterruptVector.text, + INTLEVEL2_VECTOR_VADDR, + SIZEOF(LAST), LAST) +# undef LAST +# define LAST .Level2InterruptVector.text +#endif +#if XCHAL_EXCM_LEVEL >= 3 + SECTION_VECTOR (_Level3InterruptVector_text, + .Level3InterruptVector.text, + INTLEVEL3_VECTOR_VADDR, + SIZEOF(LAST), LAST) +# undef LAST +# define LAST .Level3InterruptVector.text +#endif +#if XCHAL_EXCM_LEVEL >= 4 + SECTION_VECTOR (_Level4InterruptVector_text, + .Level4InterruptVector.text, + INTLEVEL4_VECTOR_VADDR, + SIZEOF(LAST), LAST) +# undef LAST +# define LAST .Level4InterruptVector.text +#endif +#if XCHAL_EXCM_LEVEL >= 5 + SECTION_VECTOR (_Level5InterruptVector_text, + .Level5InterruptVector.text, + INTLEVEL5_VECTOR_VADDR, + SIZEOF(LAST), LAST) +# undef LAST +# define LAST .Level5InterruptVector.text +#endif +#if XCHAL_EXCM_LEVEL >= 6 + SECTION_VECTOR (_Level6InterruptVector_text, + .Level6InterruptVector.text, + INTLEVEL6_VECTOR_VADDR, + SIZEOF(LAST), LAST) +# undef LAST +# define LAST .Level6InterruptVector.text +#endif SECTION_VECTOR (_KernelExceptionVector_literal, .KernelExceptionVector.literal, - XCHAL_KERNEL_VECTOR_VADDR - 4, - SIZEOF(.DebugInterruptVector.text), - .DebugInterruptVector.text) + KERNEL_VECTOR_VADDR - 4, + SIZEOF(LAST), LAST) +#undef LAST SECTION_VECTOR (_KernelExceptionVector_text, .KernelExceptionVector.text, - XCHAL_KERNEL_VECTOR_VADDR, + KERNEL_VECTOR_VADDR, 4, .KernelExceptionVector.literal) SECTION_VECTOR (_UserExceptionVector_literal, .UserExceptionVector.literal, - XCHAL_USER_VECTOR_VADDR - 4, + USER_VECTOR_VADDR - 4, SIZEOF(.KernelExceptionVector.text), .KernelExceptionVector.text) SECTION_VECTOR (_UserExceptionVector_text, .UserExceptionVector.text, - XCHAL_USER_VECTOR_VADDR, + USER_VECTOR_VADDR, 4, .UserExceptionVector.literal) SECTION_VECTOR (_DoubleExceptionVector_literal, .DoubleExceptionVector.literal, - XCHAL_DOUBLEEXC_VECTOR_VADDR - 16, + DOUBLEEXC_VECTOR_VADDR - 40, SIZEOF(.UserExceptionVector.text), .UserExceptionVector.text) SECTION_VECTOR (_DoubleExceptionVector_text, .DoubleExceptionVector.text, - XCHAL_DOUBLEEXC_VECTOR_VADDR, - 32, + DOUBLEEXC_VECTOR_VADDR, + 40, .DoubleExceptionVector.literal) . = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3; + +#if defined(CONFIG_SMP) + + SECTION_VECTOR (_SecondaryResetVector_literal, + .SecondaryResetVector.literal, + RESET_VECTOR1_VADDR - 4, + SIZEOF(.DoubleExceptionVector.text), + .DoubleExceptionVector.text) + + SECTION_VECTOR (_SecondaryResetVector_text, + .SecondaryResetVector.text, + RESET_VECTOR1_VADDR, + 4, + .SecondaryResetVector.literal) + + . = LOADADDR(.SecondaryResetVector.text)+SIZEOF(.SecondaryResetVector.text); + +#endif + . = ALIGN(PAGE_SIZE); __init_end = .; @@ -222,11 +311,26 @@ SECTIONS . = ALIGN(0x10); .bootstrap : { *(.bootstrap.literal .bootstrap.text .bootstrap.data) } - .ResetVector.text XCHAL_RESET_VECTOR_VADDR : + .ResetVector.text RESET_VECTOR_VADDR : { *(.ResetVector.text) } + + /* + * This is a remapped copy of the Secondary Reset Vector Code. + * It keeps gdb in sync with the PC after switching + * to the temporary mapping used while setting up + * the V2 MMU mappings for Linux. + * + * Only debug information about this section is put in the kernel image. + */ + .SecondaryResetVector.remapped_text 0x46000000 (INFO): + { + *(.SecondaryResetVector.remapped_text) + } + + .xt.lit : { *(.xt.lit) } .xt.prop : { *(.xt.prop) } diff --git a/arch/xtensa/kernel/xtensa_ksyms.c b/arch/xtensa/kernel/xtensa_ksyms.c index afe058b24e6..4d2872fd9bb 100644 --- a/arch/xtensa/kernel/xtensa_ksyms.c +++ b/arch/xtensa/kernel/xtensa_ksyms.c @@ -20,11 +20,13 @@ #include <linux/in6.h> #include <asm/uaccess.h> +#include <asm/cacheflush.h> #include <asm/checksum.h> #include <asm/dma.h> #include <asm/io.h> #include <asm/page.h> #include <asm/pgalloc.h> +#include <asm/ftrace.h> #ifdef CONFIG_BLK_DEV_FD #include <asm/floppy.h> #endif @@ -104,6 +106,7 @@ EXPORT_SYMBOL(csum_partial_copy_generic); * Architecture-specific symbols */ EXPORT_SYMBOL(__xtensa_copy_user); +EXPORT_SYMBOL(__invalidate_icache_range); /* * Kernel hacking ... @@ -119,3 +122,15 @@ EXPORT_SYMBOL(outsl); EXPORT_SYMBOL(insb); EXPORT_SYMBOL(insw); EXPORT_SYMBOL(insl); + +extern long common_exception_return; +EXPORT_SYMBOL(common_exception_return); + +#ifdef CONFIG_FUNCTION_TRACER +EXPORT_SYMBOL(_mcount); +#endif + +EXPORT_SYMBOL(__invalidate_dcache_range); +#if XCHAL_DCACHE_IS_WRITEBACK +EXPORT_SYMBOL(__flush_dcache_range); +#endif |
