diff options
author | Russell King <rmk+kernel@arm.linux.org.uk> | 2011-06-25 18:28:19 +0100 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2011-07-02 10:56:10 +0100 |
commit | bc089602d206b2abc2d2e8e5324d90342cc0447b (patch) | |
tree | 6f5f062109d2756b92eff8545531fb5f12df3929 /arch/arm/kernel/entry-armv.S | |
parent | df295df6c391e322a06dea0d2bc3d22debd15fb9 (diff) |
ARM: entry: instrument usr exception handlers with irqsoff tracing
As we no longer re-enable interrupts in these exception handlers, add
the irqsoff tracing calls to them so that the kernel tracks the state
more accurately.
Note that these calls are conditional on IRQSOFF_TRACER:
kernel ----------> user ---------> kernel
^ irqs enabled ^ irqs disabled
No kernel code can run on the local CPU until we've re-entered the
kernel through one of the exception handlers - and userspace can not
take any locks etc. So, the kernel doesn't care about the IRQ mask
state while userspace is running unless we're doing IRQ off latency
tracing. So, we can (and do) avoid the overhead of updating the IRQ
mask state on every kernel->user and user->kernel transition.
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/kernel/entry-armv.S')
-rw-r--r-- | arch/arm/kernel/entry-armv.S | 17 |
1 files changed, 16 insertions, 1 deletions
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 920dd3d0795..f863ee79093 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -421,6 +421,11 @@ ENDPROC(__pabt_svc) .align 5 __dabt_usr: usr_entry + +#ifdef CONFIG_IRQSOFF_TRACER + bl trace_hardirqs_off +#endif + kuser_cmpxchg_check dabt_helper @@ -433,12 +438,12 @@ ENDPROC(__dabt_usr) .align 5 __irq_usr: usr_entry - kuser_cmpxchg_check #ifdef CONFIG_IRQSOFF_TRACER bl trace_hardirqs_off #endif + kuser_cmpxchg_check irq_handler get_thread_info tsk mov why, #0 @@ -451,6 +456,11 @@ ENDPROC(__irq_usr) .align 5 __und_usr: usr_entry + +#ifdef CONFIG_IRQSOFF_TRACER + bl trace_hardirqs_off +#endif + mov r2, r4 mov r3, r5 @@ -669,6 +679,11 @@ ENDPROC(__und_usr_unknown) .align 5 __pabt_usr: usr_entry + +#ifdef CONFIG_IRQSOFF_TRACER + bl trace_hardirqs_off +#endif + pabt_helper mov r2, sp @ regs bl do_PrefetchAbort @ call abort handler |