diff options
Diffstat (limited to 'arch/arm/kernel/entry-armv.S')
| -rw-r--r-- | arch/arm/kernel/entry-armv.S | 1194 |
1 files changed, 692 insertions, 502 deletions
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 39a6c1b0b9a..52a949a8077 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -3,6 +3,7 @@ * * Copyright (C) 1996,1997,1998 Russell King. * ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk) + * nommu support by Hyok S. Choi (hyok.choi@samsung.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -10,66 +11,106 @@ * * Low-level vector interface routines * - * Note: there is a StrongARM bug in the STMIA rn, {regs}^ instruction that causes - * it to save wrong values... Be aware! + * Note: there is a StrongARM bug in the STMIA rn, {regs}^ instruction + * that causes it to save wrong values... Be aware! */ -#include <linux/config.h> -#include <asm/glue.h> +#include <asm/assembler.h> +#include <asm/memory.h> +#include <asm/glue-df.h> +#include <asm/glue-pf.h> #include <asm/vfpmacros.h> -#include <asm/hardware.h> /* should be moved into entry-macro.S */ -#include <asm/arch/irqs.h> /* should be moved into entry-macro.S */ -#include <asm/arch/entry-macro.S> +#ifndef CONFIG_MULTI_IRQ_HANDLER +#include <mach/entry-macro.S> +#endif +#include <asm/thread_notify.h> +#include <asm/unwind.h> +#include <asm/unistd.h> +#include <asm/tls.h> +#include <asm/system_info.h> #include "entry-header.S" +#include <asm/entry-macro-multi.S> /* - * Interrupt handling. Preserves r7, r8, r9 + * Interrupt handling. */ .macro irq_handler -1: get_irqnr_and_base r0, r6, r5, lr - movne r1, sp - @ - @ routine called with r0 = irq number, r1 = struct pt_regs * - @ - adrne lr, 1b - bne asm_do_IRQ +#ifdef CONFIG_MULTI_IRQ_HANDLER + ldr r1, =handle_arch_irq + mov r0, sp + adr lr, BSYM(9997f) + ldr pc, [r1] +#else + arch_irq_handler_default +#endif +9997: + .endm -#ifdef CONFIG_SMP - /* - * XXX - * - * this macro assumes that irqstat (r6) and base (r5) are - * preserved from get_irqnr_and_base above - */ - test_for_ipi r0, r6, r5, lr - movne r0, sp - adrne lr, 1b - bne do_IPI + .macro pabt_helper + @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5 +#ifdef MULTI_PABORT + ldr ip, .LCprocfns + mov lr, pc + ldr pc, [ip, #PROCESSOR_PABT_FUNC] +#else + bl CPU_PABORT_HANDLER #endif + .endm + + .macro dabt_helper + @ + @ Call the processor-specific abort handler: + @ + @ r2 - pt_regs + @ r4 - aborted context pc + @ r5 - aborted context psr + @ + @ The abort handler must return the aborted address in r0, and + @ the fault status register in r1. r9 must be preserved. + @ +#ifdef MULTI_DABORT + ldr ip, .LCprocfns + mov lr, pc + ldr pc, [ip, #PROCESSOR_DABT_FUNC] +#else + bl CPU_DABORT_HANDLER +#endif .endm +#ifdef CONFIG_KPROBES + .section .kprobes.text,"ax",%progbits +#else + .text +#endif + /* * Invalid mode handlers */ .macro inv_entry, reason sub sp, sp, #S_FRAME_SIZE - stmib sp, {r1 - lr} + ARM( stmib sp, {r1 - lr} ) + THUMB( stmia sp, {r0 - r12} ) + THUMB( str sp, [sp, #S_SP] ) + THUMB( str lr, [sp, #S_LR] ) mov r1, #\reason .endm __pabt_invalid: inv_entry BAD_PREFETCH b common_invalid +ENDPROC(__pabt_invalid) __dabt_invalid: inv_entry BAD_DATA b common_invalid +ENDPROC(__dabt_invalid) __irq_invalid: inv_entry BAD_IRQ b common_invalid +ENDPROC(__irq_invalid) __und_invalid: inv_entry BAD_UNDEFINSTR @@ -92,134 +133,124 @@ common_invalid: @ cpsr_<exception>, "old_r0" mov r0, sp - and r2, r6, #0x1f b bad_mode +ENDPROC(__und_invalid) /* * SVC mode handlers */ - .macro svc_entry - sub sp, sp, #S_FRAME_SIZE - stmib sp, {r1 - r12} - ldmia r0, {r1 - r3} - add r5, sp, #S_SP @ here for interlock avoidance - mov r4, #-1 @ "" "" "" "" - add r0, sp, #S_FRAME_SIZE @ "" "" "" "" - str r1, [sp] @ save the "real" r0 copied +#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) +#define SPFIX(code...) code +#else +#define SPFIX(code...) +#endif + + .macro svc_entry, stack_hole=0 + UNWIND(.fnstart ) + UNWIND(.save {r0 - pc} ) + sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4) +#ifdef CONFIG_THUMB2_KERNEL + SPFIX( str r0, [sp] ) @ temporarily saved + SPFIX( mov r0, sp ) + SPFIX( tst r0, #4 ) @ test original stack alignment + SPFIX( ldr r0, [sp] ) @ restored +#else + SPFIX( tst sp, #4 ) +#endif + SPFIX( subeq sp, sp, #4 ) + stmia sp, {r1 - r12} + + ldmia r0, {r3 - r5} + add r7, sp, #S_SP - 4 @ here for interlock avoidance + mov r6, #-1 @ "" "" "" "" + add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4) + SPFIX( addeq r2, r2, #4 ) + str r3, [sp, #-4]! @ save the "real" r0 copied @ from the exception stack - mov r1, lr + mov r3, lr @ @ We are now ready to fill in the remaining blanks on the stack: @ - @ r0 - sp_svc - @ r1 - lr_svc - @ r2 - lr_<exception>, already fixed up for correct return/restart - @ r3 - spsr_<exception> - @ r4 - orig_r0 (see pt_regs definition in ptrace.h) + @ r2 - sp_svc + @ r3 - lr_svc + @ r4 - lr_<exception>, already fixed up for correct return/restart + @ r5 - spsr_<exception> + @ r6 - orig_r0 (see pt_regs definition in ptrace.h) @ - stmia r5, {r0 - r4} + stmia r7, {r2 - r6} + +#ifdef CONFIG_TRACE_IRQFLAGS + bl trace_hardirqs_off +#endif .endm .align 5 __dabt_svc: svc_entry - - @ - @ get ready to re-enable interrupts if appropriate - @ - mrs r9, cpsr - tst r3, #PSR_I_BIT - biceq r9, r9, #PSR_I_BIT - - @ - @ Call the processor-specific abort handler: - @ - @ r2 - aborted context pc - @ r3 - aborted context cpsr - @ - @ The abort handler must return the aborted address in r0, and - @ the fault status register in r1. r9 must be preserved. - @ -#ifdef MULTI_ABORT - ldr r4, .LCprocfns - mov lr, pc - ldr pc, [r4] -#else - bl CPU_ABORT_HANDLER -#endif - - @ - @ set desired IRQ state, then call main handler - @ - msr cpsr_c, r9 mov r2, sp - bl do_DataAbort - - @ - @ IRQs off again before pulling preserved data off the stack - @ - disable_irq - - @ - @ restore SPSR and restart the instruction - @ - ldr r0, [sp, #S_PSR] - msr spsr_cxsf, r0 - ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr + dabt_helper + THUMB( ldr r5, [sp, #S_PSR] ) @ potentially updated CPSR + svc_exit r5 @ return from exception + UNWIND(.fnend ) +ENDPROC(__dabt_svc) .align 5 __irq_svc: svc_entry + irq_handler #ifdef CONFIG_PREEMPT get_thread_info tsk ldr r8, [tsk, #TI_PREEMPT] @ get preempt count - add r7, r8, #1 @ increment it - str r7, [tsk, #TI_PREEMPT] -#endif - - irq_handler -#ifdef CONFIG_PREEMPT ldr r0, [tsk, #TI_FLAGS] @ get flags + teq r8, #0 @ if preempt count != 0 + movne r0, #0 @ force flags to 0 tst r0, #_TIF_NEED_RESCHED blne svc_preempt -preempt_return: - ldr r0, [tsk, #TI_PREEMPT] @ read preempt value - str r8, [tsk, #TI_PREEMPT] @ restore preempt count - teq r0, r7 - strne r0, [r0, -r0] @ bug() #endif - ldr r0, [sp, #S_PSR] @ irqs are already disabled - msr spsr_cxsf, r0 - ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr + + svc_exit r5, irq = 1 @ return from exception + UNWIND(.fnend ) +ENDPROC(__irq_svc) .ltorg #ifdef CONFIG_PREEMPT svc_preempt: - teq r8, #0 @ was preempt count = 0 - ldreq r6, .LCirq_stat - movne pc, lr @ no - ldr r0, [r6, #4] @ local_irq_count - ldr r1, [r6, #8] @ local_bh_count - adds r0, r0, r1 - movne pc, lr - mov r7, #0 @ preempt_schedule_irq - str r7, [tsk, #TI_PREEMPT] @ expects preempt_count == 0 + mov r8, lr 1: bl preempt_schedule_irq @ irq en/disable is done inside ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS tst r0, #_TIF_NEED_RESCHED - beq preempt_return @ go again + moveq pc, r8 @ go again b 1b #endif +__und_fault: + @ Correct the PC such that it is pointing at the instruction + @ which caused the fault. If the faulting instruction was ARM + @ the PC will be pointing at the next instruction, and have to + @ subtract 4. Otherwise, it is Thumb, and the PC will be + @ pointing at the second half of the Thumb instruction. We + @ have to subtract 2. + ldr r2, [r0, #S_PC] + sub r2, r2, r1 + str r2, [r0, #S_PC] + b do_undefinstr +ENDPROC(__und_fault) + .align 5 __und_svc: +#ifdef CONFIG_KPROBES + @ If a kprobe is about to simulate a "stmdb sp..." instruction, + @ it obviously needs free stack space which then will belong to + @ the saved context. + svc_entry 64 +#else svc_entry - +#endif @ @ call emulation code, which returns using r9 if it has emulated @ the instruction, or the more conventional lr if we are to treat @@ -227,166 +258,141 @@ __und_svc: @ @ r0 - instruction @ - ldr r0, [r2, #-4] - adr r9, 1f +#ifndef CONFIG_THUMB2_KERNEL + ldr r0, [r4, #-4] +#else + mov r1, #2 + ldrh r0, [r4, #-2] @ Thumb instruction at LR - 2 + cmp r0, #0xe800 @ 32-bit instruction if xx >= 0 + blo __und_svc_fault + ldrh r9, [r4] @ bottom 16 bits + add r4, r4, #2 + str r4, [sp, #S_PC] + orr r0, r9, r0, lsl #16 +#endif + adr r9, BSYM(__und_svc_finish) + mov r2, r4 bl call_fpe + mov r1, #4 @ PC correction to apply +__und_svc_fault: mov r0, sp @ struct pt_regs *regs - bl do_undefinstr + bl __und_fault - @ - @ IRQs off again before pulling preserved data off the stack - @ -1: disable_irq - - @ - @ restore SPSR and restart the instruction - @ - ldr lr, [sp, #S_PSR] @ Get SVC cpsr - msr spsr_cxsf, lr - ldmia sp, {r0 - pc}^ @ Restore SVC registers +__und_svc_finish: + ldr r5, [sp, #S_PSR] @ Get SVC cpsr + svc_exit r5 @ return from exception + UNWIND(.fnend ) +ENDPROC(__und_svc) .align 5 __pabt_svc: svc_entry - - @ - @ re-enable interrupts if appropriate - @ - mrs r9, cpsr - tst r3, #PSR_I_BIT - biceq r9, r9, #PSR_I_BIT - msr cpsr_c, r9 - - @ - @ set args, then call main handler - @ - @ r0 - address of faulting instruction - @ r1 - pointer to registers on stack - @ - mov r0, r2 @ address (pc) - mov r1, sp @ regs - bl do_PrefetchAbort @ call abort handler - - @ - @ IRQs off again before pulling preserved data off the stack - @ - disable_irq - - @ - @ restore SPSR and restart the instruction - @ - ldr r0, [sp, #S_PSR] - msr spsr_cxsf, r0 - ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr + mov r2, sp @ regs + pabt_helper + svc_exit r5 @ return from exception + UNWIND(.fnend ) +ENDPROC(__pabt_svc) .align 5 .LCcralign: .word cr_alignment -#ifdef MULTI_ABORT +#ifdef MULTI_DABORT .LCprocfns: .word processor #endif .LCfp: .word fp_enter -#ifdef CONFIG_PREEMPT -.LCirq_stat: - .word irq_stat -#endif /* * User mode handlers + * + * EABI note: sp_svc is always 64-bit aligned here, so should S_FRAME_SIZE */ + +#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (S_FRAME_SIZE & 7) +#error "sizeof(struct pt_regs) must be a multiple of 8" +#endif + .macro usr_entry + UNWIND(.fnstart ) + UNWIND(.cantunwind ) @ don't unwind the user space sub sp, sp, #S_FRAME_SIZE - stmib sp, {r1 - r12} + ARM( stmib sp, {r1 - r12} ) + THUMB( stmia sp, {r0 - r12} ) - ldmia r0, {r1 - r3} + ldmia r0, {r3 - r5} add r0, sp, #S_PC @ here for interlock avoidance - mov r4, #-1 @ "" "" "" "" + mov r6, #-1 @ "" "" "" "" - str r1, [sp] @ save the "real" r0 copied + str r3, [sp] @ save the "real" r0 copied @ from the exception stack -#if __LINUX_ARM_ARCH__ < 6 && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) - @ make sure our user space atomic helper is aborted - cmp r2, #VIRT_OFFSET - bichs r3, r3, #PSR_Z_BIT -#endif - @ @ We are now ready to fill in the remaining blanks on the stack: @ - @ r2 - lr_<exception>, already fixed up for correct return/restart - @ r3 - spsr_<exception> - @ r4 - orig_r0 (see pt_regs definition in ptrace.h) + @ r4 - lr_<exception>, already fixed up for correct return/restart + @ r5 - spsr_<exception> + @ r6 - orig_r0 (see pt_regs definition in ptrace.h) @ @ Also, separately save sp_usr and lr_usr @ - stmia r0, {r2 - r4} - stmdb r0, {sp, lr}^ + stmia r0, {r4 - r6} + ARM( stmdb r0, {sp, lr}^ ) + THUMB( store_user_sp_lr r0, r1, S_SP - S_PC ) @ @ Enable the alignment trap while in kernel mode @ - alignment_trap r0 + alignment_trap r0, .LCcralign @ @ Clear FP to mark the first stack frame @ zero_fp - .endm - .align 5 -__dabt_usr: - usr_entry +#ifdef CONFIG_IRQSOFF_TRACER + bl trace_hardirqs_off +#endif + ct_user_exit save = 0 + .endm - @ - @ Call the processor-specific abort handler: - @ - @ r2 - aborted context pc - @ r3 - aborted context cpsr - @ - @ The abort handler must return the aborted address in r0, and - @ the fault status register in r1. - @ -#ifdef MULTI_ABORT - ldr r4, .LCprocfns - mov lr, pc - ldr pc, [r4] + .macro kuser_cmpxchg_check +#if !defined(CONFIG_CPU_32v6K) && defined(CONFIG_KUSER_HELPERS) && \ + !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) +#ifndef CONFIG_MMU +#warning "NPTL on non MMU needs fixing" #else - bl CPU_ABORT_HANDLER + @ Make sure our user space atomic helper is restarted + @ if it was interrupted in a critical region. Here we + @ perform a quick test inline since it should be false + @ 99.9999% of the time. The rest is done out of line. + cmp r4, #TASK_SIZE + blhs kuser_cmpxchg64_fixup +#endif #endif + .endm - @ - @ IRQs on, then call the main handler - @ - enable_irq + .align 5 +__dabt_usr: + usr_entry + kuser_cmpxchg_check mov r2, sp - adr lr, ret_from_exception - b do_DataAbort + dabt_helper + b ret_from_exception + UNWIND(.fnend ) +ENDPROC(__dabt_usr) .align 5 __irq_usr: usr_entry - - get_thread_info tsk -#ifdef CONFIG_PREEMPT - ldr r8, [tsk, #TI_PREEMPT] @ get preempt count - add r7, r8, #1 @ increment it - str r7, [tsk, #TI_PREEMPT] -#endif - + kuser_cmpxchg_check irq_handler -#ifdef CONFIG_PREEMPT - ldr r0, [tsk, #TI_PREEMPT] - str r8, [tsk, #TI_PREEMPT] - teq r0, r7 - strne r0, [r0, -r0] -#endif - + get_thread_info tsk mov why, #0 - b ret_to_user + b ret_to_user_from_irq + UNWIND(.fnend ) +ENDPROC(__irq_usr) .ltorg @@ -394,33 +400,105 @@ __irq_usr: __und_usr: usr_entry - tst r3, #PSR_T_BIT @ Thumb mode? - bne fpundefinstr @ ignore FP - sub r4, r2, #4 + mov r2, r4 + mov r3, r5 + @ r2 = regs->ARM_pc, which is either 2 or 4 bytes ahead of the + @ faulting instruction depending on Thumb mode. + @ r3 = regs->ARM_cpsr @ - @ fall through to the emulation code, which returns using r9 if - @ it has emulated the instruction, or the more conventional lr - @ if we are to treat this as a real undefined instruction - @ - @ r0 - instruction + @ The emulation code returns using r9 if it has emulated the + @ instruction, or the more conventional lr if we are to treat + @ this as a real undefined instruction @ + adr r9, BSYM(ret_from_exception) + + @ IRQs must be enabled before attempting to read the instruction from + @ user space since that could cause a page/translation fault if the + @ page table was modified by another CPU. + enable_irq + + tst r3, #PSR_T_BIT @ Thumb mode? + bne __und_usr_thumb + sub r4, r2, #4 @ ARM instr at LR - 4 1: ldrt r0, [r4] - adr r9, ret_from_exception - adr lr, fpundefinstr - @ - @ fallthrough to call_fpe - @ + ARM_BE8(rev r0, r0) @ little endian instruction + + @ r0 = 32-bit ARM instruction which caused the exception + @ r2 = PC value for the following instruction (:= regs->ARM_pc) + @ r4 = PC value for the faulting instruction + @ lr = 32-bit undefined instruction function + adr lr, BSYM(__und_usr_fault_32) + b call_fpe + +__und_usr_thumb: + @ Thumb instruction + sub r4, r2, #2 @ First half of thumb instr at LR - 2 +#if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7 +/* + * Thumb-2 instruction handling. Note that because pre-v6 and >= v6 platforms + * can never be supported in a single kernel, this code is not applicable at + * all when __LINUX_ARM_ARCH__ < 6. This allows simplifying assumptions to be + * made about .arch directives. + */ +#if __LINUX_ARM_ARCH__ < 7 +/* If the target CPU may not be Thumb-2-capable, a run-time check is needed: */ +#define NEED_CPU_ARCHITECTURE + ldr r5, .LCcpu_architecture + ldr r5, [r5] + cmp r5, #CPU_ARCH_ARMv7 + blo __und_usr_fault_16 @ 16bit undefined instruction +/* + * The following code won't get run unless the running CPU really is v7, so + * coding round the lack of ldrht on older arches is pointless. Temporarily + * override the assembler target arch with the minimum required instead: + */ + .arch armv6t2 +#endif +2: ldrht r5, [r4] +ARM_BE8(rev16 r5, r5) @ little endian instruction + cmp r5, #0xe800 @ 32bit instruction if xx != 0 + blo __und_usr_fault_16 @ 16bit undefined instruction +3: ldrht r0, [r2] +ARM_BE8(rev16 r0, r0) @ little endian instruction + add r2, r2, #2 @ r2 is PC + 2, make it PC + 4 + str r2, [sp, #S_PC] @ it's a 2x16bit instr, update + orr r0, r0, r5, lsl #16 + adr lr, BSYM(__und_usr_fault_32) + @ r0 = the two 16-bit Thumb instructions which caused the exception + @ r2 = PC value for the following Thumb instruction (:= regs->ARM_pc) + @ r4 = PC value for the first 16-bit Thumb instruction + @ lr = 32bit undefined instruction function + +#if __LINUX_ARM_ARCH__ < 7 +/* If the target arch was overridden, change it back: */ +#ifdef CONFIG_CPU_32v6K + .arch armv6k +#else + .arch armv6 +#endif +#endif /* __LINUX_ARM_ARCH__ < 7 */ +#else /* !(CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7) */ + b __und_usr_fault_16 +#endif + UNWIND(.fnend) +ENDPROC(__und_usr) /* - * The out of line fixup for the ldrt above. + * The out of line fixup for the ldrt instructions above. */ - .section .fixup, "ax" -2: mov pc, r9 - .previous - .section __ex_table,"a" - .long 1b, 2b - .previous + .pushsection .fixup, "ax" + .align 2 +4: str r4, [sp, #S_PC] @ retry current instruction + mov pc, r9 + .popsection + .pushsection __ex_table,"a" + .long 1b, 4b +#if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7 + .long 2b, 4b + .long 3b, 4b +#endif + .popsection /* * Check whether the instruction is a co-processor instruction. @@ -432,23 +510,55 @@ __und_usr: * co-processor instructions. However, we have to watch out * for the ARM6/ARM7 SWI bug. * + * NEON is a special case that has to be handled here. Not all + * NEON instructions are co-processor instructions, so we have + * to make a special case of checking for them. Plus, there's + * five groups of them, so we have a table of mask/opcode pairs + * to check against, and if any match then we branch off into the + * NEON handler code. + * * Emulators may wish to make use of the following registers: - * r0 = instruction opcode. - * r2 = PC+4 - * r10 = this threads thread_info structure. + * r0 = instruction opcode (32-bit ARM or two 16-bit Thumb) + * r2 = PC value to resume execution after successful emulation + * r9 = normal "successful" return address + * r10 = this threads thread_info structure + * lr = unrecognised instruction return address + * IRQs enabled, FIQs enabled. */ + @ + @ Fall-through from Thumb-2 __und_usr + @ +#ifdef CONFIG_NEON + get_thread_info r10 @ get current thread + adr r6, .LCneon_thumb_opcodes + b 2f +#endif call_fpe: - tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27 -#if defined(CONFIG_CPU_ARM610) || defined(CONFIG_CPU_ARM710) - and r8, r0, #0x0f000000 @ mask out op-code bits - teqne r8, #0x0f000000 @ SWI (ARM6/7 bug)? + get_thread_info r10 @ get current thread +#ifdef CONFIG_NEON + adr r6, .LCneon_arm_opcodes +2: ldr r5, [r6], #4 @ mask value + ldr r7, [r6], #4 @ opcode bits matching in mask + cmp r5, #0 @ end mask? + beq 1f + and r8, r0, r5 + cmp r8, r7 @ NEON instruction? + bne 2b + mov r7, #1 + strb r7, [r10, #TI_USED_CP + 10] @ mark CP#10 as used + strb r7, [r10, #TI_USED_CP + 11] @ mark CP#11 as used + b do_vfp @ let VFP handler handle this +1: #endif + tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27 + tstne r0, #0x04000000 @ bit 26 set on both ARM and Thumb-2 moveq pc, lr - get_thread_info r10 @ get current thread and r8, r0, #0x00000f00 @ mask out CP number + THUMB( lsr r8, r8, #8 ) mov r7, #1 add r6, r10, #TI_USED_CP - strb r7, [r6, r8, lsr #8] @ set appropriate used_cp[] + ARM( strb r7, [r6, r8, lsr #8] ) @ set appropriate used_cp[] + THUMB( strb r7, [r6, r8] ) @ set appropriate used_cp[] #ifdef CONFIG_IWMMXT @ Test if we need to give access to iWMMXt coprocessors ldr r5, [r10, #TI_FLAGS] @@ -456,31 +566,68 @@ call_fpe: movcss r7, r5, lsr #(TIF_USING_IWMMXT + 1) bcs iwmmxt_task_enable #endif - enable_irq - add pc, pc, r8, lsr #6 - mov r0, r0 - - mov pc, lr @ CP#0 - b do_fpe @ CP#1 (FPE) - b do_fpe @ CP#2 (FPE) - mov pc, lr @ CP#3 - mov pc, lr @ CP#4 - mov pc, lr @ CP#5 - mov pc, lr @ CP#6 - mov pc, lr @ CP#7 - mov pc, lr @ CP#8 - mov pc, lr @ CP#9 + ARM( add pc, pc, r8, lsr #6 ) + THUMB( lsl r8, r8, #2 ) + THUMB( add pc, r8 ) + nop + + movw_pc lr @ CP#0 + W(b) do_fpe @ CP#1 (FPE) + W(b) do_fpe @ CP#2 (FPE) + movw_pc lr @ CP#3 +#ifdef CONFIG_CRUNCH + b crunch_task_enable @ CP#4 (MaverickCrunch) + b crunch_task_enable @ CP#5 (MaverickCrunch) + b crunch_task_enable @ CP#6 (MaverickCrunch) +#else + movw_pc lr @ CP#4 + movw_pc lr @ CP#5 + movw_pc lr @ CP#6 +#endif + movw_pc lr @ CP#7 + movw_pc lr @ CP#8 + movw_pc lr @ CP#9 #ifdef CONFIG_VFP - b do_vfp @ CP#10 (VFP) - b do_vfp @ CP#11 (VFP) + W(b) do_vfp @ CP#10 (VFP) + W(b) do_vfp @ CP#11 (VFP) #else - mov pc, lr @ CP#10 (VFP) - mov pc, lr @ CP#11 (VFP) + movw_pc lr @ CP#10 (VFP) + movw_pc lr @ CP#11 (VFP) +#endif + movw_pc lr @ CP#12 + movw_pc lr @ CP#13 + movw_pc lr @ CP#14 (Debug) + movw_pc lr @ CP#15 (Control) + +#ifdef NEED_CPU_ARCHITECTURE + .align 2 +.LCcpu_architecture: + .word __cpu_architecture +#endif + +#ifdef CONFIG_NEON + .align 6 + +.LCneon_arm_opcodes: + .word 0xfe000000 @ mask + .word 0xf2000000 @ opcode + + .word 0xff100000 @ mask + .word 0xf4000000 @ opcode + + .word 0x00000000 @ mask + .word 0x00000000 @ opcode + +.LCneon_thumb_opcodes: + .word 0xef000000 @ mask + .word 0xef000000 @ opcode + + .word 0xff100000 @ mask + .word 0xf9000000 @ opcode + + .word 0x00000000 @ mask + .word 0x00000000 @ opcode #endif - mov pc, lr @ CP#12 - mov pc, lr @ CP#13 - mov pc, lr @ CP#14 (Debug) - mov pc, lr @ CP#15 (Control) do_fpe: ldr r4, .LCfp @@ -496,32 +643,45 @@ do_fpe: * lr = unrecognised FP instruction return address */ - .data + .pushsection .data ENTRY(fp_enter) - .word fpundefinstr - .text + .word no_fp + .popsection -fpundefinstr: - mov r0, sp - adr lr, ret_from_exception - b do_undefinstr +ENTRY(no_fp) + mov pc, lr +ENDPROC(no_fp) + +__und_usr_fault_32: + mov r1, #4 + b 1f +__und_usr_fault_16: + mov r1, #2 +1: mov r0, sp + adr lr, BSYM(ret_from_exception) + b __und_fault +ENDPROC(__und_usr_fault_32) +ENDPROC(__und_usr_fault_16) .align 5 __pabt_usr: usr_entry - - enable_irq @ Enable interrupts - mov r0, r2 @ address (pc) - mov r1, sp @ regs - bl do_PrefetchAbort @ call abort handler + mov r2, sp @ regs + pabt_helper + UNWIND(.fnend ) /* fall through */ /* * This is the return code to user mode for abort handlers */ ENTRY(ret_from_exception) + UNWIND(.fnstart ) + UNWIND(.cantunwind ) get_thread_info tsk mov why, #0 b ret_to_user + UNWIND(.fnend ) +ENDPROC(__pabt_usr) +ENDPROC(ret_from_exception) /* * Register switch for ARMv3 and ARMv4 processors @@ -529,124 +689,88 @@ ENTRY(ret_from_exception) * previous and next are guaranteed not to be the same. */ ENTRY(__switch_to) + UNWIND(.fnstart ) + UNWIND(.cantunwind ) add ip, r1, #TI_CPU_SAVE - ldr r3, [r2, #TI_TP_VALUE] - stmia ip!, {r4 - sl, fp, sp, lr} @ Store most regs on stack - ldr r6, [r2, #TI_CPU_DOMAIN]! -#if defined(CONFIG_CPU_XSCALE) && !defined(CONFIG_IWMMXT) - mra r4, r5, acc0 - stmia ip, {r4, r5} -#endif -#if defined(CONFIG_HAS_TLS_REG) - mcr p15, 0, r3, c13, c0, 3 @ set TLS register -#elif !defined(CONFIG_TLS_REG_EMUL) - mov r4, #0xffff0fff - str r3, [r4, #-15] @ TLS val at 0xffff0ff0 + ARM( stmia ip!, {r4 - sl, fp, sp, lr} ) @ Store most regs on stack + THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack + THUMB( str sp, [ip], #4 ) + THUMB( str lr, [ip], #4 ) + ldr r4, [r2, #TI_TP_VALUE] + ldr r5, [r2, #TI_TP_VALUE + 4] +#ifdef CONFIG_CPU_USE_DOMAINS + ldr r6, [r2, #TI_CPU_DOMAIN] +#endif + switch_tls r1, r4, r5, r3, r7 +#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) + ldr r7, [r2, #TI_TASK] + ldr r8, =__stack_chk_guard + ldr r7, [r7, #TSK_STACK_CANARY] #endif +#ifdef CONFIG_CPU_USE_DOMAINS mcr p15, 0, r6, c3, c0, 0 @ Set domain register -#ifdef CONFIG_VFP - @ Always disable VFP so we can lazily save/restore the old - @ state. This occurs in the context of the previous thread. - VFPFMRX r4, FPEXC - bic r4, r4, #FPEXC_ENABLE - VFPFMXR FPEXC, r4 #endif -#if defined(CONFIG_IWMMXT) - bl iwmmxt_task_switch -#elif defined(CONFIG_CPU_XSCALE) - add r4, r2, #40 @ cpu_context_save->extra - ldmib r4, {r4, r5} - mar acc0, r4, r5 + mov r5, r0 + add r4, r2, #TI_CPU_SAVE + ldr r0, =thread_notify_head + mov r1, #THREAD_NOTIFY_SWITCH + bl atomic_notifier_call_chain +#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) + str r7, [r8] #endif - ldmib r2, {r4 - sl, fp, sp, pc} @ Load all regs saved previously + THUMB( mov ip, r4 ) + mov r0, r5 + ARM( ldmia r4, {r4 - sl, fp, sp, pc} ) @ Load all regs saved previously + THUMB( ldmia ip!, {r4 - sl, fp} ) @ Load all regs saved previously + THUMB( ldr sp, [ip], #4 ) + THUMB( ldr pc, [ip] ) + UNWIND(.fnend ) +ENDPROC(__switch_to) __INIT /* * User helpers. * - * These are segment of kernel provided user code reachable from user space - * at a fixed address in kernel memory. This is used to provide user space - * with some operations which require kernel help because of unimplemented - * native feature and/or instructions in many ARM CPUs. The idea is for - * this code to be executed directly in user mode for best efficiency but - * which is too intimate with the kernel counter part to be left to user - * libraries. In fact this code might even differ from one CPU to another - * depending on the available instruction set and restrictions like on - * SMP systems. In other words, the kernel reserves the right to change - * this code as needed without warning. Only the entry points and their - * results are guaranteed to be stable. - * * Each segment is 32-byte aligned and will be moved to the top of the high * vector page. New segments (if ever needed) must be added in front of * existing ones. This mechanism should be used only for things that are * really small and justified, and not be abused freely. * - * User space is expected to implement those things inline when optimizing - * for a processor that has the necessary native support, but only if such - * resulting binaries are already to be incompatible with earlier ARM - * processors due to the use of unsupported instructions other than what - * is provided here. In other words don't make binaries unable to run on - * earlier processors just for the sake of not using these kernel helpers - * if your compiled code is not going to use the new instructions for other - * purpose. + * See Documentation/arm/kernel_user_helpers.txt for formal definitions. */ + THUMB( .arm ) + .macro usr_ret, reg +#ifdef CONFIG_ARM_THUMB + bx \reg +#else + mov pc, \reg +#endif + .endm + + .macro kuser_pad, sym, size + .if (. - \sym) & 3 + .rept 4 - (. - \sym) & 3 + .byte 0 + .endr + .endif + .rept (\size - (. - \sym)) / 4 + .word 0xe7fddef1 + .endr + .endm + +#ifdef CONFIG_KUSER_HELPERS .align 5 .globl __kuser_helper_start __kuser_helper_start: /* - * Reference prototype: - * - * int __kernel_cmpxchg(int oldval, int newval, int *ptr) - * - * Input: - * - * r0 = oldval - * r1 = newval - * r2 = ptr - * lr = return address - * - * Output: - * - * r0 = returned value (zero or non-zero) - * C flag = set if r0 == 0, clear if r0 != 0 - * - * Clobbered: - * - * r3, ip, flags - * - * Definition and user space usage example: - * - * typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr); - * #define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0) - * - * Atomically store newval in *ptr if *ptr is equal to oldval for user space. - * Return zero if *ptr was changed or non-zero if no exchange happened. - * The C flag is also set if *ptr was changed to allow for assembly - * optimization in the calling code. - * - * For example, a user space atomic_add implementation could look like this: - * - * #define atomic_add(ptr, val) \ - * ({ register unsigned int *__ptr asm("r2") = (ptr); \ - * register unsigned int __result asm("r1"); \ - * asm volatile ( \ - * "1: @ atomic_add\n\t" \ - * "ldr r0, [r2]\n\t" \ - * "mov r3, #0xffff0fff\n\t" \ - * "add lr, pc, #4\n\t" \ - * "add r1, r0, %2\n\t" \ - * "add pc, r3, #(0xffff0fc0 - 0xffff0fff)\n\t" \ - * "bcc 1b" \ - * : "=&r" (__result) \ - * : "r" (__ptr), "rIL" (val) \ - * : "r0","r3","ip","lr","cc","memory" ); \ - * __result; }) + * Due to the length of some sequences, __kuser_cmpxchg64 spans 2 regular + * kuser "slots", therefore 0xffff0f80 is not used as a valid entry point. */ -__kuser_cmpxchg: @ 0xffff0fc0 +__kuser_cmpxchg64: @ 0xffff0f60 #if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) @@ -655,107 +779,167 @@ __kuser_cmpxchg: @ 0xffff0fc0 * The kernel itself must perform the operation. * A special ghost syscall is used for that (see traps.c). */ - swi #0x9ffff0 - mov pc, lr - -#elif __LINUX_ARM_ARCH__ < 6 + stmfd sp!, {r7, lr} + ldr r7, 1f @ it's 20 bits + swi __ARM_NR_cmpxchg64 + ldmfd sp!, {r7, pc} +1: .word __ARM_NR_cmpxchg64 + +#elif defined(CONFIG_CPU_32v6K) + + stmfd sp!, {r4, r5, r6, r7} + ldrd r4, r5, [r0] @ load old val + ldrd r6, r7, [r1] @ load new val + smp_dmb arm +1: ldrexd r0, r1, [r2] @ load current val + eors r3, r0, r4 @ compare with oldval (1) + eoreqs r3, r1, r5 @ compare with oldval (2) + strexdeq r3, r6, r7, [r2] @ store newval if eq + teqeq r3, #1 @ success? + beq 1b @ if no then retry + smp_dmb arm + rsbs r0, r3, #0 @ set returned val and C flag + ldmfd sp!, {r4, r5, r6, r7} + usr_ret lr + +#elif !defined(CONFIG_SMP) + +#ifdef CONFIG_MMU /* - * Theory of operation: - * - * We set the Z flag before loading oldval. If ever an exception - * occurs we can not be sure the loaded value will still be the same - * when the exception returns, therefore the user exception handler - * will clear the Z flag whenever the interrupted user code was - * actually from the kernel address space (see the usr_entry macro). - * - * The post-increment on the str is used to prevent a race with an - * exception happening just after the str instruction which would - * clear the Z flag although the exchange was done. + * The only thing that can break atomicity in this cmpxchg64 + * implementation is either an IRQ or a data abort exception + * causing another process/thread to be scheduled in the middle of + * the critical sequence. The same strategy as for cmpxchg is used. */ - teq ip, ip @ set Z flag - ldr ip, [r2] @ load current val - add r3, r2, #1 @ prepare store ptr - teqeq ip, r0 @ compare with oldval if still allowed - streq r1, [r3, #-1]! @ store newval if still allowed - subs r0, r2, r3 @ if r2 == r3 the str occured + stmfd sp!, {r4, r5, r6, lr} + ldmia r0, {r4, r5} @ load old val + ldmia r1, {r6, lr} @ load new val +1: ldmia r2, {r0, r1} @ load current val + eors r3, r0, r4 @ compare with oldval (1) + eoreqs r3, r1, r5 @ compare with oldval (2) +2: stmeqia r2, {r6, lr} @ store newval if eq + rsbs r0, r3, #0 @ set return val and C flag + ldmfd sp!, {r4, r5, r6, pc} + + .text +kuser_cmpxchg64_fixup: + @ Called from kuser_cmpxchg_fixup. + @ r4 = address of interrupted insn (must be preserved). + @ sp = saved regs. r7 and r8 are clobbered. + @ 1b = first critical insn, 2b = last critical insn. + @ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b. + mov r7, #0xffff0fff + sub r7, r7, #(0xffff0fff - (0xffff0f60 + (1b - __kuser_cmpxchg64))) + subs r8, r4, r7 + rsbcss r8, r8, #(2b - 1b) + strcs r7, [sp, #S_PC] +#if __LINUX_ARM_ARCH__ < 6 + bcc kuser_cmpxchg32_fixup +#endif mov pc, lr + .previous #else +#warning "NPTL on non MMU needs fixing" + mov r0, #-1 + adds r0, r0, #0 + usr_ret lr +#endif - ldrex r3, [r2] - subs r3, r3, r0 - strexeq r3, r1, [r2] - rsbs r0, r3, #0 - mov pc, lr - +#else +#error "incoherent kernel configuration" #endif - .align 5 + kuser_pad __kuser_cmpxchg64, 64 -/* - * Reference prototype: - * - * int __kernel_get_tls(void) - * - * Input: - * - * lr = return address - * - * Output: - * - * r0 = TLS value - * - * Clobbered: - * - * the Z flag might be lost - * - * Definition and user space usage example: - * - * typedef int (__kernel_get_tls_t)(void); - * #define __kernel_get_tls (*(__kernel_get_tls_t *)0xffff0fe0) - * - * Get the TLS value as previously set via the __ARM_NR_set_tls syscall. - * - * This could be used as follows: - * - * #define __kernel_get_tls() \ - * ({ register unsigned int __val asm("r0"); \ - * asm( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #31" \ - * : "=r" (__val) : : "lr","cc" ); \ - * __val; }) - */ +__kuser_memory_barrier: @ 0xffff0fa0 + smp_dmb arm + usr_ret lr -__kuser_get_tls: @ 0xffff0fe0 + kuser_pad __kuser_memory_barrier, 32 + +__kuser_cmpxchg: @ 0xffff0fc0 + +#if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) + + /* + * Poor you. No fast solution possible... + * The kernel itself must perform the operation. + * A special ghost syscall is used for that (see traps.c). + */ + stmfd sp!, {r7, lr} + ldr r7, 1f @ it's 20 bits + swi __ARM_NR_cmpxchg + ldmfd sp!, {r7, pc} +1: .word __ARM_NR_cmpxchg + +#elif __LINUX_ARM_ARCH__ < 6 -#if !defined(CONFIG_HAS_TLS_REG) && !defined(CONFIG_TLS_REG_EMUL) +#ifdef CONFIG_MMU - ldr r0, [pc, #(16 - 8)] @ TLS stored at 0xffff0ff0 + /* + * The only thing that can break atomicity in this cmpxchg + * implementation is either an IRQ or a data abort exception + * causing another process/thread to be scheduled in the middle + * of the critical sequence. To prevent this, code is added to + * the IRQ and data abort exception handlers to set the pc back + * to the beginning of the critical section if it is found to be + * within that critical section (see kuser_cmpxchg_fixup). + */ +1: ldr r3, [r2] @ load current val + subs r3, r3, r0 @ compare with oldval +2: streq r1, [r2] @ store newval if eq + rsbs r0, r3, #0 @ set return val and C flag + usr_ret lr + + .text +kuser_cmpxchg32_fixup: + @ Called from kuser_cmpxchg_check macro. + @ r4 = address of interrupted insn (must be preserved). + @ sp = saved regs. r7 and r8 are clobbered. + @ 1b = first critical insn, 2b = last critical insn. + @ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b. + mov r7, #0xffff0fff + sub r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg))) + subs r8, r4, r7 + rsbcss r8, r8, #(2b - 1b) + strcs r7, [sp, #S_PC] mov pc, lr + .previous #else +#warning "NPTL on non MMU needs fixing" + mov r0, #-1 + adds r0, r0, #0 + usr_ret lr +#endif - mrc p15, 0, r0, c13, c0, 3 @ read TLS register - mov pc, lr +#else + + smp_dmb arm +1: ldrex r3, [r2] + subs r3, r3, r0 + strexeq r3, r1, [r2] + teqeq r3, #1 + beq 1b + rsbs r0, r3, #0 + /* beware -- each __kuser slot must be 8 instructions max */ + ALT_SMP(b __kuser_memory_barrier) + ALT_UP(usr_ret lr) #endif - .rep 5 - .word 0 @ pad up to __kuser_helper_version - .endr + kuser_pad __kuser_cmpxchg, 32 -/* - * Reference declaration: - * - * extern unsigned int __kernel_helper_version; - * - * Definition and user space usage example: - * - * #define __kernel_helper_version (*(unsigned int *)0xffff0ffc) - * - * User space may read this to determine the curent number of helpers - * available. - */ +__kuser_get_tls: @ 0xffff0fe0 + ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init + usr_ret lr + mrc p15, 0, r0, c13, c0, 3 @ 0xffff0fe8 hardware TLS code + kuser_pad __kuser_get_tls, 16 + .rep 3 + .word 0 @ 0xffff0ff0 software TLS value, then + .endr @ pad up to __kuser_helper_version __kuser_helper_version: @ 0xffff0ffc .word ((__kuser_helper_end - __kuser_helper_start) >> 5) @@ -763,13 +947,16 @@ __kuser_helper_version: @ 0xffff0ffc .globl __kuser_helper_end __kuser_helper_end: +#endif + + THUMB( .thumb ) /* * Vector stubs. * - * This code is copied to 0xffff0200 so we can use branches in the - * vectors, rather than ldr's. Note that this code must not - * exceed 0x300 bytes. + * This code is copied to 0xffff1000 so we can use branches in the + * vectors, rather than ldr's. Note that this code must not exceed + * a page size. * * Common stub entry macro: * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC @@ -777,7 +964,7 @@ __kuser_helper_end: * SP points to a minimal amount of processor-private memory, the address * of which is copied into r0 for the mode specific abort handler. */ - .macro vector_stub, name, correction=0 + .macro vector_stub, name, mode, correction=0 .align 5 vector_\name: @@ -797,25 +984,40 @@ vector_\name: @ Prepare for SVC32 mode. IRQs remain disabled. @ mrs r0, cpsr - bic r0, r0, #MODE_MASK - orr r0, r0, #SVC_MODE + eor r0, r0, #(\mode ^ SVC_MODE | PSR_ISETSTATE) msr spsr_cxsf, r0 @ @ the branch table must immediately follow this code @ - mov r0, sp and lr, lr, #0x0f - ldr lr, [pc, lr, lsl #2] + THUMB( adr r0, 1f ) + THUMB( ldr lr, [r0, lr, lsl #2] ) + mov r0, sp + ARM( ldr lr, [pc, lr, lsl #2] ) movs pc, lr @ branch to handler in SVC mode +ENDPROC(vector_\name) + + .align 2 + @ handler addresses follow this label +1: .endm - .globl __stubs_start + .section .stubs, "ax", %progbits __stubs_start: + @ This must be the first word + .word vector_swi + +vector_rst: + ARM( swi SYS_ERROR0 ) + THUMB( svc #0 ) + THUMB( nop ) + b vector_und + /* * Interrupt dispatcher */ - vector_stub irq, 4 + vector_stub irq, IRQ_MODE, 4 .long __irq_usr @ 0 (USR_26 / USR_32) .long __irq_invalid @ 1 (FIQ_26 / FIQ_32) @@ -838,7 +1040,7 @@ __stubs_start: * Data abort dispatcher * Enter in ABT mode, spsr = USR CPSR, lr = USR PC */ - vector_stub dabt, 8 + vector_stub dabt, ABT_MODE, 8 .long __dabt_usr @ 0 (USR_26 / USR_32) .long __dabt_invalid @ 1 (FIQ_26 / FIQ_32) @@ -861,7 +1063,7 @@ __stubs_start: * Prefetch abort dispatcher * Enter in ABT mode, spsr = USR CPSR, lr = USR PC */ - vector_stub pabt, 4 + vector_stub pabt, ABT_MODE, 4 .long __pabt_usr @ 0 (USR_26 / USR_32) .long __pabt_invalid @ 1 (FIQ_26 / FIQ_32) @@ -884,7 +1086,7 @@ __stubs_start: * Undef instr entry dispatcher * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC */ - vector_stub und + vector_stub und, UND_MODE .long __und_usr @ 0 (USR_26 / USR_32) .long __und_invalid @ 1 (FIQ_26 / FIQ_32) @@ -906,6 +1108,16 @@ __stubs_start: .align 5 /*============================================================================= + * Address exception handler + *----------------------------------------------------------------------------- + * These aren't too critical. + * (they're not supposed to happen, and won't happen in 32-bit data mode). + */ + +vector_addrexcptn: + b vector_addrexcptn + +/*============================================================================= * Undefined FIQs *----------------------------------------------------------------------------- * Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC @@ -916,52 +1128,30 @@ __stubs_start: * get out of that mode without clobbering one register. */ vector_fiq: - disable_fiq subs pc, lr, #4 -/*============================================================================= - * Address exception handler - *----------------------------------------------------------------------------- - * These aren't too critical. - * (they're not supposed to happen, and won't happen in 32-bit data mode). - */ + .globl vector_fiq_offset + .equ vector_fiq_offset, vector_fiq -vector_addrexcptn: - b vector_addrexcptn - -/* - * We group all the following data together to optimise - * for CPUs with separate I & D caches. - */ - .align 5 - -.LCvswi: - .word vector_swi - - .globl __stubs_end -__stubs_end: - - .equ stubs_offset, __vectors_start + 0x200 - __stubs_start - - .globl __vectors_start + .section .vectors, "ax", %progbits __vectors_start: - swi SYS_ERROR0 - b vector_und + stubs_offset - ldr pc, .LCvswi + stubs_offset - b vector_pabt + stubs_offset - b vector_dabt + stubs_offset - b vector_addrexcptn + stubs_offset - b vector_irq + stubs_offset - b vector_fiq + stubs_offset - - .globl __vectors_end -__vectors_end: + W(b) vector_rst + W(b) vector_und + W(ldr) pc, __vectors_start + 0x1000 + W(b) vector_pabt + W(b) vector_dabt + W(b) vector_addrexcptn + W(b) vector_irq + W(b) vector_fiq .data .globl cr_alignment - .globl cr_no_alignment cr_alignment: .space 4 -cr_no_alignment: + +#ifdef CONFIG_MULTI_IRQ_HANDLER + .globl handle_arch_irq +handle_arch_irq: .space 4 +#endif |
