diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-04-13 11:34:05 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-04-13 11:34:05 -0700 |
commit | 44d2d371d250b44cbe40f8d47e329c97668d7594 (patch) | |
tree | d93611beede5c45bdca2a850354ab416197f4f21 | |
parent | 465de2ba71f5048341e0109f3f6c4d7dc65d9754 (diff) | |
parent | c011f80ba0912486fe51dd2b3f71d9b33a151188 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-2.6:
sparc64: Add some more commentary to __raw_local_irq_save()
sparc64: Fix memory leak in pci_register_iommu_region().
sparc64: Add kmemleak annotation to sun4v_build_virq()
sparc64: Support kmemleak.
sparc64: Add function graph tracer support.
sparc64: Give a stack frame to the ftrace call sites.
sparc64: Use a seperate counter for timer interrupts and NMI checks, like x86.
sparc64: Remove profiling from some low-level bits.
sparc64: Kill unnecessary static on local var in ftrace_call_replace().
sparc64: Kill CONFIG_STACK_DEBUG code.
sparc64: Add HAVE_FUNCTION_TRACE_MCOUNT_TEST and tidy up.
sparc64: Adjust __raw_local_irq_save() to cooperate in NMIs.
sparc64: Use kstack_valid() in die_if_kernel().
-rw-r--r-- | arch/sparc/Kconfig | 3 | ||||
-rw-r--r-- | arch/sparc/Kconfig.debug | 5 | ||||
-rw-r--r-- | arch/sparc/include/asm/cpudata_64.h | 2 | ||||
-rw-r--r-- | arch/sparc/include/asm/irqflags_64.h | 23 | ||||
-rw-r--r-- | arch/sparc/kernel/Makefile | 10 | ||||
-rw-r--r-- | arch/sparc/kernel/ftrace.c | 60 | ||||
-rw-r--r-- | arch/sparc/kernel/irq_64.c | 11 | ||||
-rw-r--r-- | arch/sparc/kernel/kgdb_64.c | 3 | ||||
-rw-r--r-- | arch/sparc/kernel/nmi.c | 3 | ||||
-rw-r--r-- | arch/sparc/kernel/pci_common.c | 11 | ||||
-rw-r--r-- | arch/sparc/kernel/pcr.c | 3 | ||||
-rw-r--r-- | arch/sparc/kernel/smp_64.c | 11 | ||||
-rw-r--r-- | arch/sparc/kernel/time_64.c | 4 | ||||
-rw-r--r-- | arch/sparc/kernel/traps_64.c | 26 | ||||
-rw-r--r-- | arch/sparc/kernel/vmlinux.lds.S | 5 | ||||
-rw-r--r-- | arch/sparc/lib/mcount.S | 159 | ||||
-rw-r--r-- | lib/Kconfig.debug | 2 |
17 files changed, 206 insertions, 135 deletions
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 6db51367405..9908d477ccd 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -37,6 +37,9 @@ config SPARC64 def_bool 64BIT select ARCH_SUPPORTS_MSI select HAVE_FUNCTION_TRACER + select HAVE_FUNCTION_GRAPH_TRACER + select HAVE_FUNCTION_GRAPH_FP_TEST + select HAVE_FUNCTION_TRACE_MCOUNT_TEST select HAVE_KRETPROBES select HAVE_KPROBES select HAVE_LMB diff --git a/arch/sparc/Kconfig.debug b/arch/sparc/Kconfig.debug index 9d3c889718a..1b4a831565f 100644 --- a/arch/sparc/Kconfig.debug +++ b/arch/sparc/Kconfig.debug @@ -19,13 +19,10 @@ config DEBUG_DCFLUSH bool "D-cache flush debugging" depends on SPARC64 && DEBUG_KERNEL -config STACK_DEBUG - bool "Stack Overflow Detection Support" - config MCOUNT bool depends on SPARC64 - depends on STACK_DEBUG || FUNCTION_TRACER + depends on FUNCTION_TRACER default y config FRAME_POINTER diff --git a/arch/sparc/include/asm/cpudata_64.h b/arch/sparc/include/asm/cpudata_64.h index 926397d345f..050ef35b9dc 100644 --- a/arch/sparc/include/asm/cpudata_64.h +++ b/arch/sparc/include/asm/cpudata_64.h @@ -17,7 +17,7 @@ typedef struct { unsigned int __nmi_count; unsigned long clock_tick; /* %tick's per second */ unsigned long __pad; - unsigned int __pad1; + unsigned int irq0_irqs; unsigned int __pad2; /* Dcache line 2, rarely used */ diff --git a/arch/sparc/include/asm/irqflags_64.h b/arch/sparc/include/asm/irqflags_64.h index 8b49bf920df..bfa1ea45b4c 100644 --- a/arch/sparc/include/asm/irqflags_64.h +++ b/arch/sparc/include/asm/irqflags_64.h @@ -76,9 +76,26 @@ static inline int raw_irqs_disabled(void) */ static inline unsigned long __raw_local_irq_save(void) { - unsigned long flags = __raw_local_save_flags(); - - raw_local_irq_disable(); + unsigned long flags, tmp; + + /* Disable interrupts to PIL_NORMAL_MAX unless we already + * are using PIL_NMI, in which case PIL_NMI is retained. + * + * The only values we ever program into the %pil are 0, + * PIL_NORMAL_MAX and PIL_NMI. + * + * Since PIL_NMI is the largest %pil value and all bits are + * set in it (0xf), it doesn't matter what PIL_NORMAL_MAX + * actually is. + */ + __asm__ __volatile__( + "rdpr %%pil, %0\n\t" + "or %0, %2, %1\n\t" + "wrpr %1, 0x0, %%pil" + : "=r" (flags), "=r" (tmp) + : "i" (PIL_NORMAL_MAX) + : "memory" + ); return flags; } diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile index c6316142db4..0c2dc1f24a9 100644 --- a/arch/sparc/kernel/Makefile +++ b/arch/sparc/kernel/Makefile @@ -13,6 +13,14 @@ extra-y += init_task.o CPPFLAGS_vmlinux.lds := -Usparc -m$(BITS) extra-y += vmlinux.lds +ifdef CONFIG_FUNCTION_TRACER +# Do not profile debug and lowlevel utilities +CFLAGS_REMOVE_ftrace.o := -pg +CFLAGS_REMOVE_time_$(BITS).o := -pg +CFLAGS_REMOVE_perf_event.o := -pg +CFLAGS_REMOVE_pcr.o := -pg +endif + obj-$(CONFIG_SPARC32) += entry.o wof.o wuf.o obj-$(CONFIG_SPARC32) += etrap_32.o obj-$(CONFIG_SPARC32) += rtrap_32.o @@ -85,7 +93,7 @@ obj-$(CONFIG_KGDB) += kgdb_$(BITS).o obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o -CFLAGS_REMOVE_ftrace.o := -pg +obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o obj-$(CONFIG_EARLYFB) += btext.o obj-$(CONFIG_STACKTRACE) += stacktrace.o diff --git a/arch/sparc/kernel/ftrace.c b/arch/sparc/kernel/ftrace.c index 9103a56b39e..03ab022e51c 100644 --- a/arch/sparc/kernel/ftrace.c +++ b/arch/sparc/kernel/ftrace.c @@ -13,7 +13,7 @@ static const u32 ftrace_nop = 0x01000000; static u32 ftrace_call_replace(unsigned long ip, unsigned long addr) { - static u32 call; + u32 call; s32 off; off = ((s32)addr - (s32)ip); @@ -91,3 +91,61 @@ int __init ftrace_dyn_arch_init(void *data) return 0; } #endif + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + +#ifdef CONFIG_DYNAMIC_FTRACE +extern void ftrace_graph_call(void); + +int ftrace_enable_ftrace_graph_caller(void) +{ + unsigned long ip = (unsigned long)(&ftrace_graph_call); + u32 old, new; + + old = *(u32 *) &ftrace_graph_call; + new = ftrace_call_replace(ip, (unsigned long) &ftrace_graph_caller); + return ftrace_modify_code(ip, old, new); +} + +int ftrace_disable_ftrace_graph_caller(void) +{ + unsigned long ip = (unsigned long)(&ftrace_graph_call); + u32 old, new; + + old = *(u32 *) &ftrace_graph_call; + new = ftrace_call_replace(ip, (unsigned long) &ftrace_stub); + + return ftrace_modify_code(ip, old, new); +} + +#endif /* !CONFIG_DYNAMIC_FTRACE */ + +/* + * Hook the return address and push it in the stack of return addrs + * in current thread info. + */ +unsigned long prepare_ftrace_return(unsigned long parent, + unsigned long self_addr, + unsigned long frame_pointer) +{ + unsigned long return_hooker = (unsigned long) &return_to_handler; + struct ftrace_graph_ent trace; + + if (unlikely(atomic_read(¤t->tracing_graph_pause))) + return parent + 8UL; + + if (ftrace_push_return_trace(parent, self_addr, &trace.depth, + frame_pointer) == -EBUSY) + return parent + 8UL; + + trace.func = self_addr; + + /* Only trace if the calling function expects to */ + if (!ftrace_graph_entry(&trace)) { + current->curr_ret_stack--; + return parent + 8UL; + } + + return return_hooker; +} +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c index e1cbdb94d97..454ce3a2527 100644 --- a/arch/sparc/kernel/irq_64.c +++ b/arch/sparc/kernel/irq_64.c @@ -20,6 +20,7 @@ #include <linux/delay.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> +#include <linux/ftrace.h> #include <linux/irq.h> #include <asm/ptrace.h> @@ -647,6 +648,14 @@ unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino) bucket = kzalloc(sizeof(struct ino_bucket), GFP_ATOMIC); if (unlikely(!bucket)) return 0; + + /* The only reference we store to the IRQ bucket is + * by physical address which kmemleak can't see, tell + * it that this object explicitly is not a leak and + * should be scanned. + */ + kmemleak_not_leak(bucket); + __flush_dcache_range((unsigned long) bucket, ((unsigned long) bucket + sizeof(struct ino_bucket))); @@ -721,7 +730,7 @@ static __attribute__((always_inline)) void restore_hardirq_stack(void *orig_sp) __asm__ __volatile__("mov %0, %%sp" : : "r" (orig_sp)); } -void handler_irq(int irq, struct pt_regs *regs) +void __irq_entry handler_irq(int irq, struct pt_regs *regs) { unsigned long pstate, bucket_pa; struct pt_regs *old_regs; diff --git a/arch/sparc/kernel/kgdb_64.c b/arch/sparc/kernel/kgdb_64.c index f5a0fd490b5..0a2bd0f99fc 100644 --- a/arch/sparc/kernel/kgdb_64.c +++ b/arch/sparc/kernel/kgdb_64.c @@ -5,6 +5,7 @@ #include <linux/kgdb.h> #include <linux/kdebug.h> +#include <linux/ftrace.h> #include <asm/kdebug.h> #include <asm/ptrace.h> @@ -108,7 +109,7 @@ void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs) } #ifdef CONFIG_SMP -void smp_kgdb_capture_client(int irq, struct pt_regs *regs) +void __irq_entry smp_kgdb_capture_client(int irq, struct pt_regs *regs) { unsigned long flags; diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c index b287b62c7ea..75a3d1a2535 100644 --- a/arch/sparc/kernel/nmi.c +++ b/arch/sparc/kernel/nmi.c @@ -92,7 +92,6 @@ static void die_nmi(const char *str, struct pt_regs *regs, int do_panic) notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs) { unsigned int sum, touched = 0; - int cpu = smp_processor_id(); clear_softint(1 << irq); @@ -106,7 +105,7 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs) else pcr_ops->write(PCR_PIC_PRIV); - sum = kstat_irqs_cpu(0, cpu); + sum = local_cpu_data().irq0_irqs; if (__get_cpu_var(nmi_touch)) { __get_cpu_var(nmi_touch) = 0; touched = 1; diff --git a/arch/sparc/kernel/pci_common.c b/arch/sparc/kernel/pci_common.c index b775658a927..8a000583b5c 100644 --- a/arch/sparc/kernel/pci_common.c +++ b/arch/sparc/kernel/pci_common.c @@ -371,14 +371,19 @@ static void pci_register_iommu_region(struct pci_pbm_info *pbm) struct resource *rp = kzalloc(sizeof(*rp), GFP_KERNEL); if (!rp) { - prom_printf("Cannot allocate IOMMU resource.\n"); - prom_halt(); + pr_info("%s: Cannot allocate IOMMU resource.\n", + pbm->name); + return; } rp->name = "IOMMU"; rp->start = pbm->mem_space.start + (unsigned long) vdma[0]; rp->end = rp->start + (unsigned long) vdma[1] - 1UL; rp->flags = IORESOURCE_BUSY; - request_resource(&pbm->mem_space, rp); + if (request_resource(&pbm->mem_space, rp)) { + pr_info("%s: Unable to request IOMMU resource.\n", + pbm->name); + kfree(rp); + } } } diff --git a/arch/sparc/kernel/pcr.c b/arch/sparc/kernel/pcr.c index 2d94e7a03af..c4a6a50b484 100644 --- a/arch/sparc/kernel/pcr.c +++ b/arch/sparc/kernel/pcr.c @@ -8,6 +8,7 @@ #include <linux/irq.h> #include <linux/perf_event.h> +#include <linux/ftrace.h> #include <asm/pil.h> #include <asm/pcr.h> @@ -34,7 +35,7 @@ unsigned int picl_shift; * Therefore in such situations we defer the work by signalling * a lower level cpu IRQ. */ -void deferred_pcr_work_irq(int irq, struct pt_regs *regs) +void __irq_entry deferred_pcr_work_irq(int irq, struct pt_regs *regs) { struct pt_regs *old_regs; diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index 4c533452810..b6a2b8f4704 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c @@ -22,6 +22,7 @@ #include <linux/profile.h> #include <linux/bootmem.h> #include <linux/vmalloc.h> +#include <linux/ftrace.h> #include <linux/cpu.h> #include <linux/slab.h> @@ -823,13 +824,13 @@ void arch_send_call_function_single_ipi(int cpu) &cpumask_of_cpu(cpu)); } -void smp_call_function_client(int irq, struct pt_regs *regs) +void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs) { clear_softint(1 << irq); generic_smp_call_function_interrupt(); } -void smp_call_function_single_client(int irq, struct pt_regs *regs) +void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs) { clear_softint(1 << irq); generic_smp_call_function_single_interrupt(); @@ -965,7 +966,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page) put_cpu(); } -void smp_new_mmu_context_version_client(int irq, struct pt_regs *regs) +void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs) { struct mm_struct *mm; unsigned long flags; @@ -1149,7 +1150,7 @@ void smp_release(void) */ extern void prom_world(int); -void smp_penguin_jailcell(int irq, struct pt_regs *regs) +void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs) { clear_softint(1 << irq); @@ -1365,7 +1366,7 @@ void smp_send_reschedule(int cpu) &cpumask_of_cpu(cpu)); } -void smp_receive_signal_client(int irq, struct pt_regs *regs) +void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs) { clear_softint(1 << irq); } diff --git a/arch/sparc/kernel/time_64.c b/arch/sparc/kernel/time_64.c index 67e16510288..c7bbe6cf7b8 100644 --- a/arch/sparc/kernel/time_64.c +++ b/arch/sparc/kernel/time_64.c @@ -35,6 +35,7 @@ #include <linux/clocksource.h> #include <linux/of_device.h> #include <linux/platform_device.h> +#include <linux/ftrace.h> #include <asm/oplib.h> #include <asm/timer.h> @@ -717,7 +718,7 @@ static struct clock_event_device sparc64_clockevent = { }; static DEFINE_PER_CPU(struct clock_event_device, sparc64_events); -void timer_interrupt(int irq, struct pt_regs *regs) +void __irq_entry timer_interrupt(int irq, struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); unsigned long tick_mask = tick_ops->softint_mask; @@ -728,6 +729,7 @@ void timer_interrupt(int irq, struct pt_regs *regs) irq_enter(); + local_cpu_data().irq0_irqs++; kstat_incr_irqs_this_cpu(0, irq_to_desc(0)); if (unlikely(!evt->event_handler)) { diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c index 837dfc2390d..9da57f03298 100644 --- a/arch/sparc/kernel/traps_64.c +++ b/arch/sparc/kernel/traps_64.c @@ -2203,27 +2203,6 @@ void dump_stack(void) EXPORT_SYMBOL(dump_stack); -static inline int is_kernel_stack(struct task_struct *task, - struct reg_window *rw) -{ - unsigned long rw_addr = (unsigned long) rw; - unsigned long thread_base, thread_end; - - if (rw_addr < PAGE_OFFSET) { - if (task != &init_task) - return 0; - } - - thread_base = (unsigned long) task_stack_page(task); - thread_end = thread_base + sizeof(union thread_union); - if (rw_addr >= thread_base && - rw_addr < thread_end && - !(rw_addr & 0x7UL)) - return 1; - - return 0; -} - static inline struct reg_window *kernel_stack_up(struct reg_window *rw) { unsigned long fp = rw->ins[6]; @@ -2252,6 +2231,7 @@ void die_if_kernel(char *str, struct pt_regs *regs) show_regs(regs); add_taint(TAINT_DIE); if (regs->tstate & TSTATE_PRIV) { + struct thread_info *tp = current_thread_info(); struct reg_window *rw = (struct reg_window *) (regs->u_regs[UREG_FP] + STACK_BIAS); @@ -2259,8 +2239,8 @@ void die_if_kernel(char *str, struct pt_regs *regs) * find some badly aligned kernel stack. */ while (rw && - count++ < 30&& - is_kernel_stack(current, rw)) { + count++ < 30 && + kstack_valid(tp, (unsigned long) rw)) { printk("Caller[%016lx]: %pS\n", rw->ins[7], (void *) rw->ins[7]); diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S index 4e599259396..0c1e6783657 100644 --- a/arch/sparc/kernel/vmlinux.lds.S +++ b/arch/sparc/kernel/vmlinux.lds.S @@ -46,11 +46,16 @@ SECTIONS SCHED_TEXT LOCK_TEXT KPROBES_TEXT + IRQENTRY_TEXT *(.gnu.warning) } = 0 _etext = .; RO_DATA(PAGE_SIZE) + + /* Start of data section */ + _sdata = .; + .data1 : { *(.data1) } diff --git a/arch/sparc/lib/mcount.S b/arch/sparc/lib/mcount.S index 24b8b12deed..3753e3c6e17 100644 --- a/arch/sparc/lib/mcount.S +++ b/arch/sparc/lib/mcount.S @@ -7,26 +7,11 @@ #include <linux/linkage.h> -#include <asm/ptrace.h> -#include <asm/thread_info.h> - /* * This is the main variant and is called by C code. GCC's -pg option * automatically instruments every C function with a call to this. */ -#ifdef CONFIG_STACK_DEBUG - -#define OVSTACKSIZE 4096 /* lets hope this is enough */ - - .data - .align 8 -panicstring: - .asciz "Stack overflow\n" - .align 8 -ovstack: - .skip OVSTACKSIZE -#endif .text .align 32 .globl _mcount @@ -35,84 +20,48 @@ ovstack: .type mcount,#function _mcount: mcount: -#ifdef CONFIG_STACK_DEBUG - /* - * Check whether %sp is dangerously low. - */ - ldub [%g6 + TI_FPDEPTH], %g1 - srl %g1, 1, %g3 - add %g3, 1, %g3 - sllx %g3, 8, %g3 ! each fpregs frame is 256b - add %g3, 192, %g3 - add %g6, %g3, %g3 ! where does task_struct+frame end? - sub %g3, STACK_BIAS, %g3 - cmp %sp, %g3 - bg,pt %xcc, 1f - nop - lduh [%g6 + TI_CPU], %g1 - sethi %hi(hardirq_stack), %g3 - or %g3, %lo(hardirq_stack), %g3 - sllx %g1, 3, %g1 - ldx [%g3 + %g1], %g7 - sub %g7, STACK_BIAS, %g7 - cmp %sp, %g7 - bleu,pt %xcc, 2f - sethi %hi(THREAD_SIZE), %g3 - add %g7, %g3, %g7 - cmp %sp, %g7 - blu,pn %xcc, 1f -2: sethi %hi(softirq_stack), %g3 - or %g3, %lo(softirq_stack), %g3 - ldx [%g3 + %g1], %g7 - sub %g7, STACK_BIAS, %g7 - cmp %sp, %g7 - bleu,pt %xcc, 3f - sethi %hi(THREAD_SIZE), %g3 - add %g7, %g3, %g7 - cmp %sp, %g7 - blu,pn %xcc, 1f - nop - /* If we are already on ovstack, don't hop onto it - * again, we are already trying to output the stack overflow - * message. - */ -3: sethi %hi(ovstack), %g7 ! cant move to panic stack fast enough - or %g7, %lo(ovstack), %g7 - add %g7, OVSTACKSIZE, %g3 - sub %g3, STACK_BIAS + 192, %g3 - sub %g7, STACK_BIAS, %g7 - cmp %sp, %g7 - blu,pn %xcc, 2f - cmp %sp, %g3 - bleu,pn %xcc, 1f - nop -2: mov %g3, %sp - sethi %hi(panicstring), %g3 - call prom_printf - or %g3, %lo(panicstring), %o0 - call prom_halt - nop -1: -#endif #ifdef CONFIG_FUNCTION_TRACER #ifdef CONFIG_DYNAMIC_FTRACE - mov %o7, %o0 - .globl mcount_call -mcount_call: - call ftrace_stub - mov %o0, %o7 + /* Do nothing, the retl/nop below is all we need. */ #else - sethi %hi(ftrace_trace_function), %g1 + sethi %hi(function_trace_stop), %g1 + lduw [%g1 + %lo(function_trace_stop)], %g2 + brnz,pn %g2, 2f + sethi %hi(ftrace_trace_function), %g1 sethi %hi(ftrace_stub), %g2 ldx [%g1 + %lo(ftrace_trace_function)], %g1 or %g2, %lo(ftrace_stub), %g2 cmp %g1, %g2 be,pn %icc, 1f - mov %i7, %o1 - jmpl %g1, %g0 - mov %o7, %o0 + mov %i7, %g3 + save %sp, -128, %sp + mov %g3, %o1 + jmpl %g1, %o7 + mov %i7, %o0 + ret + restore /* not reached */ 1: +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + sethi %hi(ftrace_graph_return), %g1 + ldx [%g1 + %lo(ftrace_graph_return)], %g3 + cmp %g2, %g3 + bne,pn %xcc, 5f + sethi %hi(ftrace_graph_entry_stub), %g2 + sethi %hi(ftrace_graph_entry), %g1 + or %g2, %lo(ftrace_graph_entry_stub), %g2 + ldx [%g1 + %lo(ftrace_graph_entry)], %g1 + cmp %g1, %g2 + be,pt %xcc, 2f + nop +5: mov %i7, %g2 + mov %fp, %g3 + save %sp, -128, %sp + mov %g2, %l0 + ba,pt %xcc, ftrace_graph_caller + mov %g3, %l1 +#endif +2: #endif #endif retl @@ -131,14 +80,50 @@ ftrace_stub: .globl ftrace_caller .type ftrace_caller,#function ftrace_caller: - mov %i7, %o1 - mov %o7, %o0 + sethi %hi(function_trace_stop), %g1 + mov %i7, %g2 + lduw [%g1 + %lo(function_trace_stop)], %g1 + brnz,pn %g1, ftrace_stub + mov %fp, %g3 + save %sp, -128, %sp + mov %g2, %o1 + mov %g2, %l0 + mov %g3, %l1 .globl ftrace_call ftrace_call: call ftrace_stub - mov %o0, %o7 - retl + mov %i7, %o0 +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + .globl ftrace_graph_call +ftrace_graph_call: + call ftrace_stub nop +#endif + ret + restore +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + .size ftrace_graph_call,.-ftrace_graph_call +#endif + .size ftrace_call,.-ftrace_call .size ftrace_caller,.-ftrace_caller #endif #endif + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +ENTRY(ftrace_graph_caller) + mov %l0, %o0 + mov %i7, %o1 + call prepare_ftrace_return + mov %l1, %o2 + ret + restore %o0, -8, %i7 +END(ftrace_graph_caller) + +ENTRY(return_to_handler) + save %sp, -128, %sp + call ftrace_return_to_handler + mov %fp, %o0 + jmpl %o0 + 8, %g0 + restore +END(return_to_handler) +#endif diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index ff017108700..935248bdbc4 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -356,7 +356,7 @@ config SLUB_STATS config DEBUG_KMEMLEAK bool "Kernel memory leak detector" depends on DEBUG_KERNEL && EXPERIMENTAL && !MEMORY_HOTPLUG && \ - (X86 || ARM || PPC || S390 || SUPERH || MICROBLAZE) + (X86 || ARM || PPC || S390 || SPARC64 || SUPERH || MICROBLAZE) select DEBUG_FS if SYSFS select STACKTRACE if STACKTRACE_SUPPORT |