diff options
55 files changed, 3173 insertions, 2844 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 9d260836339..2e86b8f6210 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -14,6 +14,7 @@ config ARM select HAVE_FUNCTION_TRACER if (!XIP_KERNEL) select HAVE_FTRACE_MCOUNT_RECORD if (!XIP_KERNEL) select HAVE_DYNAMIC_FTRACE if (!XIP_KERNEL) + select HAVE_FUNCTION_GRAPH_TRACER if (!THUMB2_KERNEL) select HAVE_GENERIC_DMA_COHERENT select HAVE_KERNEL_GZIP select HAVE_KERNEL_LZO @@ -38,6 +39,9 @@ config HAVE_PWM config SYS_SUPPORTS_APM_EMULATION bool +config HAVE_SCHED_CLOCK + bool + config GENERIC_GPIO bool @@ -233,6 +237,7 @@ config ARCH_REALVIEW bool "ARM Ltd. RealView family" select ARM_AMBA select COMMON_CLKDEV + select HAVE_SCHED_CLOCK select ICST select GENERIC_CLOCKEVENTS select ARCH_WANT_OPTIONAL_GPIOLIB @@ -247,6 +252,7 @@ config ARCH_VERSATILE select ARM_AMBA select ARM_VIC select COMMON_CLKDEV + select HAVE_SCHED_CLOCK select ICST select GENERIC_CLOCKEVENTS select ARCH_WANT_OPTIONAL_GPIOLIB @@ -263,6 +269,7 @@ config ARCH_VEXPRESS select COMMON_CLKDEV select GENERIC_CLOCKEVENTS select HAVE_CLK + select HAVE_SCHED_CLOCK select ICST select PLAT_VERSATILE help @@ -434,6 +441,7 @@ config ARCH_IXP4XX select CPU_XSCALE select GENERIC_GPIO select GENERIC_CLOCKEVENTS + select HAVE_SCHED_CLOCK select DMABOUNCE if PCI help Support for Intel's IXP4XX (XScale) family of processors. @@ -509,6 +517,7 @@ config ARCH_MMP select ARCH_REQUIRE_GPIOLIB select COMMON_CLKDEV select GENERIC_CLOCKEVENTS + select HAVE_SCHED_CLOCK select TICK_ONESHOT select PLAT_PXA select SPARSE_IRQ @@ -565,6 +574,7 @@ config ARCH_TEGRA select GENERIC_CLOCKEVENTS select GENERIC_GPIO select HAVE_CLK + select HAVE_SCHED_CLOCK select COMMON_CLKDEV select ARCH_HAS_BARRIERS if CACHE_L2X0 select ARCH_HAS_CPUFREQ @@ -588,6 +598,7 @@ config ARCH_PXA select COMMON_CLKDEV select ARCH_REQUIRE_GPIOLIB select GENERIC_CLOCKEVENTS + select HAVE_SCHED_CLOCK select TICK_ONESHOT select PLAT_PXA select SPARSE_IRQ @@ -636,6 +647,7 @@ config ARCH_SA1100 select CPU_FREQ select GENERIC_CLOCKEVENTS select HAVE_CLK + select HAVE_SCHED_CLOCK select TICK_ONESHOT select ARCH_REQUIRE_GPIOLIB help @@ -782,6 +794,7 @@ config ARCH_U300 bool "ST-Ericsson U300 Series" depends on MMU select CPU_ARM926T + select HAVE_SCHED_CLOCK select HAVE_TCM select ARM_AMBA select ARM_VIC @@ -830,6 +843,7 @@ config ARCH_OMAP select ARCH_REQUIRE_GPIOLIB select ARCH_HAS_CPUFREQ select GENERIC_CLOCKEVENTS + select HAVE_SCHED_CLOCK select ARCH_HAS_HOLES_MEMORYMODEL help Support for TI's OMAP platform (OMAP1/2/3/4). @@ -983,9 +997,11 @@ config ARCH_ACORN config PLAT_IOP bool select GENERIC_CLOCKEVENTS + select HAVE_SCHED_CLOCK config PLAT_ORION bool + select HAVE_SCHED_CLOCK config PLAT_PXA bool @@ -1212,10 +1228,11 @@ config SMP depends on EXPERIMENTAL depends on GENERIC_CLOCKEVENTS depends on REALVIEW_EB_ARM11MP || REALVIEW_EB_A9MP || \ - MACH_REALVIEW_PB11MP || MACH_REALVIEW_PBX || ARCH_OMAP4 ||\ - ARCH_S5PV310 || ARCH_TEGRA || ARCH_U8500 || ARCH_VEXPRESS_CA9X4 + MACH_REALVIEW_PB11MP || MACH_REALVIEW_PBX || ARCH_OMAP4 || \ + ARCH_S5PV310 || ARCH_TEGRA || ARCH_U8500 || ARCH_VEXPRESS_CA9X4 || \ + ARCH_MSM_SCORPIONMP select USE_GENERIC_SMP_HELPERS - select HAVE_ARM_SCU + select HAVE_ARM_SCU if !ARCH_MSM_SCORPIONMP help This enables support for systems with more than one CPU. If you have a system with only one CPU, like most personal computers, say N. If @@ -1290,6 +1307,7 @@ config NR_CPUS config HOTPLUG_CPU bool "Support for hot-pluggable CPUs (EXPERIMENTAL)" depends on SMP && HOTPLUG && EXPERIMENTAL + depends on !ARCH_MSM help Say Y here to experiment with turning CPUs off and on. CPUs can be controlled through /sys/devices/system/cpu. @@ -1298,7 +1316,7 @@ config LOCAL_TIMERS bool "Use local timer interrupts" depends on SMP default y - select HAVE_ARM_TWD + select HAVE_ARM_TWD if !ARCH_MSM_SCORPIONMP help Enable support for local timers on SMP platforms, rather then the legacy IPI broadcast method. Local timers allows the system diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug index 2fd0b99afc4..eac62085f5b 100644 --- a/arch/arm/Kconfig.debug +++ b/arch/arm/Kconfig.debug @@ -23,7 +23,7 @@ config STRICT_DEVMEM config FRAME_POINTER bool depends on !THUMB2_KERNEL - default y if !ARM_UNWIND + default y if !ARM_UNWIND || FUNCTION_GRAPH_TRACER help If you say N here, the resulting kernel will be slightly smaller and faster. However, if neither FRAME_POINTER nor ARM_UNWIND are enabled, diff --git a/arch/arm/common/timer-sp.c b/arch/arm/common/timer-sp.c index 4740313daa5..6ef3342153b 100644 --- a/arch/arm/common/timer-sp.c +++ b/arch/arm/common/timer-sp.c @@ -44,7 +44,6 @@ static struct clocksource clocksource_sp804 = { .rating = 200, .read = sp804_read, .mask = CLOCKSOURCE_MASK(32), - .shift = 20, .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; @@ -61,8 +60,7 @@ void __init sp804_clocksource_init(void __iomem *base) writel(TIMER_CTRL_32BIT | TIMER_CTRL_ENABLE | TIMER_CTRL_PERIODIC, clksrc_base + TIMER_CTRL); - cs->mult = clocksource_khz2mult(TIMER_FREQ_KHZ, cs->shift); - clocksource_register(cs); + clocksource_register_khz(cs, TIMER_FREQ_KHZ); } diff --git a/arch/arm/include/asm/sched_clock.h b/arch/arm/include/asm/sched_clock.h new file mode 100644 index 00000000000..a84628be1a7 --- /dev/null +++ b/arch/arm/include/asm/sched_clock.h @@ -0,0 +1,118 @@ +/* + * sched_clock.h: support for extending counters to full 64-bit ns counter + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef ASM_SCHED_CLOCK +#define ASM_SCHED_CLOCK + +#include <linux/kernel.h> +#include <linux/types.h> + +struct clock_data { + u64 epoch_ns; + u32 epoch_cyc; + u32 epoch_cyc_copy; + u32 mult; + u32 shift; +}; + +#define DEFINE_CLOCK_DATA(name) struct clock_data name + +static inline u64 cyc_to_ns(u64 cyc, u32 mult, u32 shift) +{ + return (cyc * mult) >> shift; +} + +/* + * Atomically update the sched_clock epoch. Your update callback will + * be called from a timer before the counter wraps - read the current + * counter value, and call this function to safely move the epochs + * forward. Only use this from the update callback. + */ +static inline void update_sched_clock(struct clock_data *cd, u32 cyc, u32 mask) +{ + unsigned long flags; + u64 ns = cd->epoch_ns + + cyc_to_ns((cyc - cd->epoch_cyc) & mask, cd->mult, cd->shift); + + /* + * Write epoch_cyc and epoch_ns in a way that the update is + * detectable in cyc_to_fixed_sched_clock(). + */ + raw_local_irq_save(flags); + cd->epoch_cyc = cyc; + smp_wmb(); + cd->epoch_ns = ns; + smp_wmb(); + cd->epoch_cyc_copy = cyc; + raw_local_irq_restore(flags); +} + +/* + * If your clock rate is known at compile time, using this will allow + * you to optimize the mult/shift loads away. This is paired with + * init_fixed_sched_clock() to ensure that your mult/shift are correct. + */ +static inline unsigned long long cyc_to_fixed_sched_clock(struct clock_data *cd, + u32 cyc, u32 mask, u32 mult, u32 shift) +{ + u64 epoch_ns; + u32 epoch_cyc; + + /* + * Load the epoch_cyc and epoch_ns atomically. We do this by + * ensuring that we always write epoch_cyc, epoch_ns and + * epoch_cyc_copy in strict order, and read them in strict order. + * If epoch_cyc and epoch_cyc_copy are not equal, then we're in + * the middle of an update, and we should repeat the load. + */ + do { + epoch_cyc = cd->epoch_cyc; + smp_rmb(); + epoch_ns = cd->epoch_ns; + smp_rmb(); + } while (epoch_cyc != cd->epoch_cyc_copy); + + return epoch_ns + cyc_to_ns((cyc - epoch_cyc) & mask, mult, shift); +} + +/* + * Otherwise, you need to use this, which will obtain the mult/shift + * from the clock_data structure. Use init_sched_clock() with this. + */ +static inline unsigned long long cyc_to_sched_clock(struct clock_data *cd, + u32 cyc, u32 mask) +{ + return cyc_to_fixed_sched_clock(cd, cyc, mask, cd->mult, cd->shift); +} + +/* + * Initialize the clock data - calculate the appropriate multiplier + * and shift. Also setup a timer to ensure that the epoch is refreshed + * at the appropriate time interval, which will call your update + * handler. + */ +void init_sched_clock(struct clock_data *, void (*)(void), + unsigned int, unsigned long); + +/* + * Use this initialization function rather than init_sched_clock() if + * you're using cyc_to_fixed_sched_clock, which will warn if your + * constants are incorrect. + */ +static inline void init_fixed_sched_clock(struct clock_data *cd, + void (*update)(void), unsigned int bits, unsigned long rate, + u32 mult, u32 shift) +{ + init_sched_clock(cd, update, bits, rate); + if (cd->mult != mult || cd->shift != shift) { + pr_crit("sched_clock: wrong multiply/shift: %u>>%u vs calculated %u>>%u\n" + "sched_clock: fix multiply/shift to avoid scheduler hiccups\n", + mult, shift, cd->mult, cd->shift); + } +} + +#endif diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h index 80025948b8a..3222ab8b344 100644 --- a/arch/arm/include/asm/system.h +++ b/arch/arm/include/asm/system.h @@ -63,6 +63,11 @@ #include <asm/outercache.h> #define __exception __attribute__((section(".exception.text"))) +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +#define __exception_irq_entry __irq_entry +#else +#define __exception_irq_entry __exception +#endif struct thread_info; struct task_struct; diff --git a/arch/arm/include/asm/traps.h b/arch/arm/include/asm/traps.h index 491960bf426..124475afb00 100644 --- a/arch/arm/include/asm/traps.h +++ b/arch/arm/include/asm/traps.h @@ -15,13 +15,32 @@ struct undef_hook { void register_undef_hook(struct undef_hook *hook); void unregister_undef_hook(struct undef_hook *hook); +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +static inline int __in_irqentry_text(unsigned long ptr) +{ + extern char __irqentry_text_start[]; + extern char __irqentry_text_end[]; + + return ptr >= (unsigned long)&__irqentry_text_start && + ptr < (unsigned long)&__irqentry_text_end; +} +#else +static inline int __in_irqentry_text(unsigned long ptr) +{ + return 0; +} +#endif + static inline int in_exception_text(unsigned long ptr) { extern char __exception_text_start[]; extern char __exception_text_end[]; + int in; + + in = ptr >= (unsigned long)&__exception_text_start && + ptr < (unsigned long)&__exception_text_end; - return ptr >= (unsigned long)&__exception_text_start && - ptr < (unsigned long)&__exception_text_end; + return in ? : __in_irqentry_text(ptr); } extern void __init early_trap_init(void); diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index 5b9b268f4fb..fd3ec49bfba 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile @@ -5,7 +5,7 @@ CPPFLAGS_vmlinux.lds := -DTEXT_OFFSET=$(TEXT_OFFSET) AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET) -ifdef CONFIG_DYNAMIC_FTRACE +ifdef CONFIG_FUNCTION_TRACER CFLAGS_REMOVE_ftrace.o = -pg endif @@ -29,10 +29,12 @@ obj-$(CONFIG_MODULES) += armksyms.o module.o obj-$(CONFIG_ARTHUR) += arthur.o obj-$(CONFIG_ISA_DMA) += dma-isa.o obj-$(CONFIG_PCI) += bios32.o isa.o +obj-$(CONFIG_HAVE_SCHED_CLOCK) += sched_clock.o obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_HAVE_ARM_SCU) += smp_scu.o obj-$(CONFIG_HAVE_ARM_TWD) += smp_twd.o obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o +obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o obj-$(CONFIG_KPROBES) += kprobes.o kprobes-decode.o obj-$(CONFIG_ATAGS_PROC) += atags.o diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index 80bf8cd88d7..1e7b04a40a3 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -147,98 +147,170 @@ ENDPROC(ret_from_fork) #endif #endif -#ifdef CONFIG_DYNAMIC_FTRACE -ENTRY(__gnu_mcount_nc) - mov ip, lr - ldmia sp!, {lr} - mov pc, ip -ENDPROC(__gnu_mcount_nc) +.macro __mcount suffix + mcount_enter + ldr r0, =ftrace_trace_function + ldr r2, [r0] + adr r0, .Lftrace_stub + cmp r0, r2 + bne 1f + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + ldr r1, =ftrace_graph_return + ldr r2, [r1] + cmp r0, r2 + bne ftrace_graph_caller\suffix + + ldr r1, =ftrace_graph_entry + ldr r2, [r1] + ldr r0, =ftrace_graph_entry_stub + cmp r0, r2 + bne ftrace_graph_caller\suffix +#endif -ENTRY(ftrace_caller) - stmdb sp!, {r0-r3, lr} - mov r0, lr + mcount_exit + +1: mcount_get_lr r1 @ lr of instrumented func + mov r0, lr @ instrumented function + sub r0, r0, #MCOUNT_INSN_SIZE + adr lr, BSYM(2f) + mov pc, r2 +2: mcount_exit +.endm + +.macro __ftrace_caller suffix + mcount_enter + + mcount_get_lr r1 @ lr of instrumented func + mov r0, lr @ instrumented function sub r0, r0, #MCOUNT_INSN_SIZE - ldr r1, [sp, #20] - .global ftrace_call -ftrace_call: + .globl ftrace_call\suffix +ftrace_call\suffix: bl ftrace_stub - ldmia sp!, {r0-r3, ip, lr} - mov pc, ip -ENDPROC(ftrace_caller) + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + .globl ftrace_graph_call\suffix +ftrace_graph_call\suffix: + mov r0, r0 +#endif + + mcount_exit +.endm + +.macro __ftrace_graph_caller + sub r0, fp, #4 @ &lr of instrumented routine (&parent) +#ifdef CONFIG_DYNAMIC_FTRACE + @ called from __ftrace_caller, saved in mcount_enter + ldr r1, [sp, #16] @ instrumented routine (func) +#else + @ called from __mcount, untouched in lr + mov r1, lr @ instrumented routine (func) +#endif + sub r1, r1, #MCOUNT_INSN_SIZE + mov r2, fp @ frame pointer + bl prepare_ftrace_return + mcount_exit +.endm #ifdef CONFIG_OLD_MCOUNT +/* + * mcount + */ + +.macro mcount_enter + stmdb sp!, {r0-r3, lr} +.endm + +.macro mcount_get_lr reg + ldr \reg, [fp, #-4] +.endm + +.macro mcount_exit + ldr lr, [fp, #-4] + ldmia sp!, {r0-r3, pc} +.endm + ENTRY(mcount) +#ifdef CONFIG_DYNAMIC_FTRACE stmdb sp!, {lr} ldr lr, [fp, #-4] ldmia sp!, {pc} +#else + __mcount _old +#endif ENDPROC(mcount) +#ifdef CONFIG_DYNAMIC_FTRACE ENTRY(ftrace_caller_old) - stmdb sp!, {r0-r3, lr} - ldr r1, [fp, #-4] - mov r0, lr - sub r0, r0, #MCOUNT_INSN_SIZE - - .globl ftrace_call_old -ftrace_call_old: - bl ftrace_stub - ldr lr, [fp, #-4] @ restore lr - ldmia sp!, {r0-r3, pc} + __ftrace_caller _old ENDPROC(ftrace_caller_old) #endif -#else +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +ENTRY(ftrace_graph_caller_old) + __ftrace_graph_caller +ENDPROC(ftrace_graph_caller_old) +#endif -ENTRY(__gnu_mcount_nc) +.purgem mcount_enter +.purgem mcount_get_lr +.purgem mcount_exit +#endif + +/* + * __gnu_mcount_nc + */ + +.macro mcount_enter stmdb sp!, {r0-r3, lr} - ldr r0, =ftrace_trace_function - ldr r2, [r0] - adr r0, .Lftrace_stub - cmp r0, r2 - bne gnu_trace +.endm + +.macro mcount_get_lr reg + ldr \reg, [sp, #20] +.endm + +.macro mcount_exit ldmia sp!, {r0-r3, ip, lr} mov pc, ip +.endm -gnu_trace: - ldr r1, [sp, #20] @ lr of instrumented routine - mov r0, lr - sub r0, r0, #MCOUNT_INSN_SIZE - adr lr, BSYM(1f) - mov pc, r2 -1: - ldmia sp!, {r0-r3, ip, lr} +ENTRY(__gnu_mcount_nc) +#ifdef CONFIG_DYNAMIC_FTRACE + mov ip, lr + ldmia sp!, {lr} mov pc, ip +#else + __mcount +#endif ENDPROC(__gnu_mcount_nc) -#ifdef CONFIG_OLD_MCOUNT -/* - * This is under an ifdef in order to force link-time errors for people trying - * to build with !FRAME_POINTER with a GCC which doesn't use the new-style - * mcount. - */ -ENTRY(mcount) - stmdb sp!, {r0-r3, lr} - ldr r0, =ftrace_trace_function - ldr r2, [r0] - adr r0, ftrace_stub - cmp r0, r2 - bne trace - ldr lr, [fp, #-4] @ restore lr - ldmia sp!, {r0-r3, pc} +#ifdef CONFIG_DYNAMIC_FTRACE +ENTRY(ftrace_caller) + __ftrace_caller +ENDPROC(ftrace_caller) +#endif -trace: - ldr r1, [fp, #-4] @ lr of instrumented routine - mov r0, lr - sub r0, r0, #MCOUNT_INSN_SIZE - mov lr, pc - mov pc, r2 - ldr lr, [fp, #-4] @ restore lr - ldmia sp!, {r0-r3, pc} -ENDPROC(mcount) +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +ENTRY(ftrace_graph_caller) + __ftrace_graph_caller +ENDPROC(ftrace_graph_caller) #endif -#endif /* CONFIG_DYNAMIC_FTRACE */ +.purgem mcount_enter +.purgem mcount_get_lr +.purgem mcount_exit + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + .globl return_to_handler +return_to_handler: + stmdb sp!, {r0-r3} + mov r0, fp @ frame pointer + bl ftrace_return_to_handler + mov lr, r0 @ r0 has real ret addr + ldmia sp!, {r0-r3} + mov pc, lr +#endif ENTRY(ftrace_stub) .Lftrace_stub: diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c index 971ac8c36ea..c0062ad1e84 100644 --- a/arch/arm/kernel/ftrace.c +++ b/arch/arm/kernel/ftrace.c @@ -24,6 +24,7 @@ #define NOP 0xe8bd4000 /* pop {lr} */ #endif +#ifdef CONFIG_DYNAMIC_FTRACE #ifdef CONFIG_OLD_MCOUNT #define OLD_MCOUNT_ADDR ((unsigned long) mcount) #define OLD_FTRACE_ADDR ((unsigned long) ftrace_caller_old) @@ -59,9 +60,9 @@ static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr) } #endif -/* construct a branch (BL) instruction to addr */ #ifdef CONFIG_THUMB2_KERNEL -static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr) +static unsigned long ftrace_gen_branch(unsigned long pc, unsigned long addr, + bool link) { unsigned long s, j1, j2, i1, i2, imm10, imm11; unsigned long first, second; @@ -83,15 +84,22 @@ static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr) j2 = (!i2) ^ s; first = 0xf000 | (s << 10) | imm10; - second = 0xd000 | (j1 << 13) | (j2 << 11) | imm11; + second = 0x9000 | (j1 << 13) | (j2 << 11) | imm11; + if (link) + second |= 1 << 14; return (second << 16) | first; } #else -static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr) +static unsigned long ftrace_gen_branch(unsigned long pc, unsigned long addr, + bool link) { + unsigned long opcode = 0xea000000; long offset; + if (link) + opcode |= 1 << 24; + offset = (long)addr - (long)(pc + 8); if (unlikely(offset < -33554432 || offset > 33554428)) { /* Can't generate branches that far (from ARM ARM). Ftrace @@ -103,10 +111,15 @@ static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr) offset = (offset >> 2) & 0x00ffffff; - return 0xeb000000 | offset; + return opcode | offset; } #endif +static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr) +{ + return ftrace_gen_branch(pc, addr, true); +} + static int ftrace_modify_code(unsigned long pc, unsigned long old, unsigned long new) { @@ -193,3 +206,83 @@ int __init ftrace_dyn_arch_init(void *data) return 0; } +#endif /* CONFIG_DYNAMIC_FTRACE */ + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, + unsigned long frame_pointer) +{ + unsigned long return_hooker = (unsigned long) &return_to_handler; + struct ftrace_graph_ent trace; + unsigned long old; + int err; + + if (unlikely(atomic_read(¤t->tracing_graph_pause))) + return; + + old = *parent; + *parent = return_hooker; + + err = ftrace_push_return_trace(old, self_addr, &trace.depth, + frame_pointer); + if (err == -EBUSY) { + *parent = old; + return; + } + + trace.func = self_addr; + + /* Only trace if the calling function expects to */ + if (!ftrace_graph_entry(&trace)) { + current->curr_ret_stack--; + *parent = old; + } +} + +#ifdef CONFIG_DYNAMIC_FTRACE +extern unsigned long ftrace_graph_call; +extern unsigned long ftrace_graph_call_old; +extern void ftrace_graph_caller_old(void); + +static int __ftrace_modify_caller(unsigned long *callsite, + void (*func) (void), bool enable) +{ + unsigned long caller_fn = (unsigned long) func; + unsigned long pc = (unsigned long) |