diff options
author | Uwe Kleine-König <u.kleine-koenig@pengutronix.de> | 2009-08-13 20:38:17 +0200 |
---|---|---|
committer | Uwe Kleine-König <u.kleine-koenig@pengutronix.de> | 2009-08-13 20:34:37 +0200 |
commit | 0d928b0b616d1c5c5fe76019a87cba171ca91633 (patch) | |
tree | db71283925be4df3ea3cf66f9a3eab5f4f349a06 /arch/arm | |
parent | 181f817eaaca4c1f8a9c265d339d2b96de8b245d (diff) |
Complete irq tracing support for ARM
Before this patch enabling and disabling irqs in assembler code and by
the hardware wasn't tracked completly.
I had to transpose two instructions in arch/arm/lib/bitops.h because
restore_irqs doesn't preserve the flags with CONFIG_TRACE_IRQFLAGS=y
Signed-off-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
Cc: Russell King <linux@arm.linux.org.uk>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ingo Molnar <mingo@redhat.com>
Signed-off-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
Diffstat (limited to 'arch/arm')
-rw-r--r-- | arch/arm/include/asm/assembler.h | 49 | ||||
-rw-r--r-- | arch/arm/kernel/entry-armv.S | 10 | ||||
-rw-r--r-- | arch/arm/lib/bitops.h | 2 |
3 files changed, 49 insertions, 12 deletions
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index 15f8a092b70..44912cd5da1 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h @@ -74,23 +74,56 @@ * Enable and disable interrupts */ #if __LINUX_ARM_ARCH__ >= 6 - .macro disable_irq + .macro disable_irq_notrace cpsid i .endm - .macro enable_irq + .macro enable_irq_notrace cpsie i .endm #else - .macro disable_irq + .macro disable_irq_notrace msr cpsr_c, #PSR_I_BIT | SVC_MODE .endm - .macro enable_irq + .macro enable_irq_notrace msr cpsr_c, #SVC_MODE .endm #endif + .macro asm_trace_hardirqs_off +#if defined(CONFIG_TRACE_IRQFLAGS) + stmdb sp!, {r0-r3, ip, lr} + bl trace_hardirqs_off + ldmia sp!, {r0-r3, ip, lr} +#endif + .endm + + .macro asm_trace_hardirqs_on_cond, cond +#if defined(CONFIG_TRACE_IRQFLAGS) + /* + * actually the registers should be pushed and pop'd conditionally, but + * after bl the flags are certainly clobbered + */ + stmdb sp!, {r0-r3, ip, lr} + bl\cond trace_hardirqs_on + ldmia sp!, {r0-r3, ip, lr} +#endif + .endm + + .macro asm_trace_hardirqs_on + asm_trace_hardirqs_on_cond al + .endm + + .macro disable_irq + disable_irq_notrace + asm_trace_hardirqs_off + .endm + + .macro enable_irq + asm_trace_hardirqs_on + enable_irq_notrace + .endm /* * Save the current IRQ state and disable IRQs. Note that this macro * assumes FIQs are enabled, and that the processor is in SVC mode. @@ -104,10 +137,16 @@ * Restore interrupt state previously stored in a register. We don't * guarantee that this will preserve the flags. */ - .macro restore_irqs, oldcpsr + .macro restore_irqs_notrace, oldcpsr msr cpsr_c, \oldcpsr .endm + .macro restore_irqs, oldcpsr + tst \oldcpsr, #PSR_I_BIT + asm_trace_hardirqs_on_cond eq + restore_irqs_notrace \oldcpsr + .endm + #define USER(x...) \ 9999: x; \ .section __ex_table,"a"; \ diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index fc8af43c500..792abd0dfae 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -151,6 +151,8 @@ ENDPROC(__und_invalid) @ r4 - orig_r0 (see pt_regs definition in ptrace.h) @ stmia r5, {r0 - r4} + + asm_trace_hardirqs_off .endm .align 5 @@ -206,9 +208,6 @@ ENDPROC(__dabt_svc) __irq_svc: svc_entry -#ifdef CONFIG_TRACE_IRQFLAGS - bl trace_hardirqs_off -#endif #ifdef CONFIG_PREEMPT get_thread_info tsk ldr r8, [tsk, #TI_PREEMPT] @ get preempt count @@ -383,6 +382,8 @@ ENDPROC(__pabt_svc) @ Clear FP to mark the first stack frame @ zero_fp + + asm_trace_hardirqs_off .endm .macro kuser_cmpxchg_check @@ -437,9 +438,6 @@ __irq_usr: usr_entry kuser_cmpxchg_check -#ifdef CONFIG_TRACE_IRQFLAGS - bl trace_hardirqs_off -#endif get_thread_info tsk #ifdef CONFIG_PREEMPT ldr r8, [tsk, #TI_PREEMPT] @ get preempt count diff --git a/arch/arm/lib/bitops.h b/arch/arm/lib/bitops.h index c7f2627385e..d42252918bf 100644 --- a/arch/arm/lib/bitops.h +++ b/arch/arm/lib/bitops.h @@ -60,8 +60,8 @@ tst r2, r0, lsl r3 \instr r2, r2, r0, lsl r3 \store r2, [r1] - restore_irqs ip moveq r0, #0 + restore_irqs ip mov pc, lr .endm #endif |