aboutsummaryrefslogtreecommitdiff
path: root/include/linux/ftrace.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/ftrace.h')
-rw-r--r--include/linux/ftrace.h427
1 files changed, 376 insertions, 51 deletions
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index dcd6a7c3a43..404a686a364 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -10,7 +10,7 @@
#include <linux/kallsyms.h>
#include <linux/linkage.h>
#include <linux/bitops.h>
-#include <linux/module.h>
+#include <linux/ptrace.h>
#include <linux/ktime.h>
#include <linux/sched.h>
#include <linux/types.h>
@@ -19,6 +19,31 @@
#include <asm/ftrace.h>
+/*
+ * If the arch supports passing the variable contents of
+ * function_trace_op as the third parameter back from the
+ * mcount call, then the arch should define this as 1.
+ */
+#ifndef ARCH_SUPPORTS_FTRACE_OPS
+#define ARCH_SUPPORTS_FTRACE_OPS 0
+#endif
+
+/*
+ * If the arch's mcount caller does not support all of ftrace's
+ * features, then it must call an indirect function that
+ * does. Or at least does enough to prevent any unwelcomed side effects.
+ */
+#if !defined(CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST) || \
+ !ARCH_SUPPORTS_FTRACE_OPS
+# define FTRACE_FORCE_LIST_FUNC 1
+#else
+# define FTRACE_FORCE_LIST_FUNC 0
+#endif
+
+
+struct module;
+struct ftrace_hash;
+
#ifdef CONFIG_FUNCTION_TRACER
extern int ftrace_enabled;
@@ -27,11 +52,79 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos);
-typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip);
+struct ftrace_ops;
+
+typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
+ struct ftrace_ops *op, struct pt_regs *regs);
+
+/*
+ * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are
+ * set in the flags member.
+ *
+ * ENABLED - set/unset when ftrace_ops is registered/unregistered
+ * DYNAMIC - set when ftrace_ops is registered to denote dynamically
+ * allocated ftrace_ops which need special care
+ * CONTROL - set manualy by ftrace_ops user to denote the ftrace_ops
+ * could be controled by following calls:
+ * ftrace_function_local_enable
+ * ftrace_function_local_disable
+ * SAVE_REGS - The ftrace_ops wants regs saved at each function called
+ * and passed to the callback. If this flag is set, but the
+ * architecture does not support passing regs
+ * (CONFIG_DYNAMIC_FTRACE_WITH_REGS is not defined), then the
+ * ftrace_ops will fail to register, unless the next flag
+ * is set.
+ * SAVE_REGS_IF_SUPPORTED - This is the same as SAVE_REGS, but if the
+ * handler can handle an arch that does not save regs
+ * (the handler tests if regs == NULL), then it can set
+ * this flag instead. It will not fail registering the ftrace_ops
+ * but, the regs field will be NULL if the arch does not support
+ * passing regs to the handler.
+ * Note, if this flag is set, the SAVE_REGS flag will automatically
+ * get set upon registering the ftrace_ops, if the arch supports it.
+ * RECURSION_SAFE - The ftrace_ops can set this to tell the ftrace infrastructure
+ * that the call back has its own recursion protection. If it does
+ * not set this, then the ftrace infrastructure will add recursion
+ * protection for the caller.
+ * STUB - The ftrace_ops is just a place holder.
+ * INITIALIZED - The ftrace_ops has already been initialized (first use time
+ * register_ftrace_function() is called, it will initialized the ops)
+ * DELETED - The ops are being deleted, do not let them be registered again.
+ */
+enum {
+ FTRACE_OPS_FL_ENABLED = 1 << 0,
+ FTRACE_OPS_FL_DYNAMIC = 1 << 1,
+ FTRACE_OPS_FL_CONTROL = 1 << 2,
+ FTRACE_OPS_FL_SAVE_REGS = 1 << 3,
+ FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 4,
+ FTRACE_OPS_FL_RECURSION_SAFE = 1 << 5,
+ FTRACE_OPS_FL_STUB = 1 << 6,
+ FTRACE_OPS_FL_INITIALIZED = 1 << 7,
+ FTRACE_OPS_FL_DELETED = 1 << 8,
+};
+/*
+ * Note, ftrace_ops can be referenced outside of RCU protection.
+ * (Although, for perf, the control ops prevent that). If ftrace_ops is
+ * allocated and not part of kernel core data, the unregistering of it will
+ * perform a scheduling on all CPUs to make sure that there are no more users.
+ * Depending on the load of the system that may take a bit of time.
+ *
+ * Any private data added must also take care not to be freed and if private
+ * data is added to a ftrace_ops that is in core code, the user of the
+ * ftrace_ops must perform a schedule_on_each_cpu() before freeing it.
+ */
struct ftrace_ops {
- ftrace_func_t func;
- struct ftrace_ops *next;
+ ftrace_func_t func;
+ struct ftrace_ops *next;
+ unsigned long flags;
+ int __percpu *disabled;
+ void *private;
+#ifdef CONFIG_DYNAMIC_FTRACE
+ struct ftrace_hash *notrace_hash;
+ struct ftrace_hash *filter_hash;
+ struct mutex regex_lock;
+#endif
};
extern int function_trace_stop;
@@ -84,7 +177,57 @@ int register_ftrace_function(struct ftrace_ops *ops);
int unregister_ftrace_function(struct ftrace_ops *ops);
void clear_ftrace_function(void);
-extern void ftrace_stub(unsigned long a0, unsigned long a1);
+/**
+ * ftrace_function_local_enable - enable controlled ftrace_ops on current cpu
+ *
+ * This function enables tracing on current cpu by decreasing
+ * the per cpu control variable.
+ * It must be called with preemption disabled and only on ftrace_ops
+ * registered with FTRACE_OPS_FL_CONTROL. If called without preemption
+ * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
+ */
+static inline void ftrace_function_local_enable(struct ftrace_ops *ops)
+{
+ if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL)))
+ return;
+
+ (*this_cpu_ptr(ops->disabled))--;
+}
+
+/**
+ * ftrace_function_local_disable - enable controlled ftrace_ops on current cpu
+ *
+ * This function enables tracing on current cpu by decreasing
+ * the per cpu control variable.
+ * It must be called with preemption disabled and only on ftrace_ops
+ * registered with FTRACE_OPS_FL_CONTROL. If called without preemption
+ * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
+ */
+static inline void ftrace_function_local_disable(struct ftrace_ops *ops)
+{
+ if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL)))
+ return;
+
+ (*this_cpu_ptr(ops->disabled))++;
+}
+
+/**
+ * ftrace_function_local_disabled - returns ftrace_ops disabled value
+ * on current cpu
+ *
+ * This function returns value of ftrace_ops::disabled on current cpu.
+ * It must be called with preemption disabled and only on ftrace_ops
+ * registered with FTRACE_OPS_FL_CONTROL. If called without preemption
+ * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
+ */
+static inline int ftrace_function_local_disabled(struct ftrace_ops *ops)
+{
+ WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL));
+ return *this_cpu_ptr(ops->disabled);
+}
+
+extern void ftrace_stub(unsigned long a0, unsigned long a1,
+ struct ftrace_ops *op, struct pt_regs *regs);
#else /* !CONFIG_FUNCTION_TRACER */
/*
@@ -93,6 +236,10 @@ extern void ftrace_stub(unsigned long a0, unsigned long a1);
*/
#define register_ftrace_function(ops) ({ 0; })
#define unregister_ftrace_function(ops) ({ 0; })
+static inline int ftrace_nr_registered_ops(void)
+{
+ return 0;
+}
static inline void clear_ftrace_function(void) { }
static inline void ftrace_kill(void) { }
static inline void ftrace_stop(void) { }
@@ -110,7 +257,8 @@ stack_trace_sysctl(struct ctl_table *table, int write,
struct ftrace_func_command {
struct list_head list;
char *name;
- int (*func)(char *func, char *cmd,
+ int (*func)(struct ftrace_hash *hash,
+ char *func, char *cmd,
char *params, int enable);
};
@@ -119,14 +267,18 @@ struct ftrace_func_command {
int ftrace_arch_code_modify_prepare(void);
int ftrace_arch_code_modify_post_process(void);
+void ftrace_bug(int err, unsigned long ip);
+
struct seq_file;
struct ftrace_probe_ops {
void (*func)(unsigned long ip,
unsigned long parent_ip,
void **data);
- int (*callback)(unsigned long ip, void **data);
- void (*free)(void **data);
+ int (*init)(struct ftrace_probe_ops *ops,
+ unsigned long ip, void **data);
+ void (*free)(struct ftrace_probe_ops *ops,
+ unsigned long ip, void **data);
int (*print)(struct seq_file *m,
unsigned long ip,
struct ftrace_probe_ops *ops,
@@ -143,46 +295,150 @@ extern void
unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops);
extern void unregister_ftrace_function_probe_all(char *glob);
-extern int ftrace_text_reserved(void *start, void *end);
+extern int ftrace_text_reserved(const void *start, const void *end);
+extern int ftrace_nr_registered_ops(void);
+
+/*
+ * The dyn_ftrace record's flags field is split into two parts.
+ * the first part which is '0-FTRACE_REF_MAX' is a counter of
+ * the number of callbacks that have registered the function that
+ * the dyn_ftrace descriptor represents.
+ *
+ * The second part is a mask:
+ * ENABLED - the function is being traced
+ * REGS - the record wants the function to save regs
+ * REGS_EN - the function is set up to save regs.
+ *
+ * When a new ftrace_ops is registered and wants a function to save
+ * pt_regs, the rec->flag REGS is set. When the function has been
+ * set up to save regs, the REG_EN flag is set. Once a function
+ * starts saving regs it will do so until all ftrace_ops are removed
+ * from tracing that function.
+ */
enum {
- FTRACE_FL_FREE = (1 << 0),
- FTRACE_FL_FAILED = (1 << 1),
- FTRACE_FL_FILTER = (1 << 2),
- FTRACE_FL_ENABLED = (1 << 3),
- FTRACE_FL_NOTRACE = (1 << 4),
- FTRACE_FL_CONVERTED = (1 << 5),
+ FTRACE_FL_ENABLED = (1UL << 29),
+ FTRACE_FL_REGS = (1UL << 30),
+ FTRACE_FL_REGS_EN = (1UL << 31)
};
+#define FTRACE_FL_MASK (0x7UL << 29)
+#define FTRACE_REF_MAX ((1UL << 29) - 1)
+
struct dyn_ftrace {
- union {
- unsigned long ip; /* address of mcount call-site */
- struct dyn_ftrace *freelist;
- };
- union {
- unsigned long flags;
- struct dyn_ftrace *newlist;
- };
- struct dyn_arch_ftrace arch;
+ unsigned long ip; /* address of mcount call-site */
+ unsigned long flags;
+ struct dyn_arch_ftrace arch;
};
int ftrace_force_update(void);
-void ftrace_set_filter(unsigned char *buf, int len, int reset);
+int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
+ int remove, int reset);
+int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
+ int len, int reset);
+int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
+ int len, int reset);
+void ftrace_set_global_filter(unsigned char *buf, int len, int reset);
+void ftrace_set_global_notrace(unsigned char *buf, int len, int reset);
+void ftrace_free_filter(struct ftrace_ops *ops);
int register_ftrace_command(struct ftrace_func_command *cmd);
int unregister_ftrace_command(struct ftrace_func_command *cmd);
+enum {
+ FTRACE_UPDATE_CALLS = (1 << 0),
+ FTRACE_DISABLE_CALLS = (1 << 1),
+ FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
+ FTRACE_START_FUNC_RET = (1 << 3),
+ FTRACE_STOP_FUNC_RET = (1 << 4),
+};
+
+/*
+ * The FTRACE_UPDATE_* enum is used to pass information back
+ * from the ftrace_update_record() and ftrace_test_record()
+ * functions. These are called by the code update routines
+ * to find out what is to be done for a given function.
+ *
+ * IGNORE - The function is already what we want it to be
+ * MAKE_CALL - Start tracing the function
+ * MODIFY_CALL - Stop saving regs for the function
+ * MAKE_NOP - Stop tracing the function
+ */
+enum {
+ FTRACE_UPDATE_IGNORE,
+ FTRACE_UPDATE_MAKE_CALL,
+ FTRACE_UPDATE_MODIFY_CALL,
+ FTRACE_UPDATE_MAKE_NOP,
+};
+
+enum {
+ FTRACE_ITER_FILTER = (1 << 0),
+ FTRACE_ITER_NOTRACE = (1 << 1),
+ FTRACE_ITER_PRINTALL = (1 << 2),
+ FTRACE_ITER_DO_HASH = (1 << 3),
+ FTRACE_ITER_HASH = (1 << 4),
+ FTRACE_ITER_ENABLED = (1 << 5),
+};
+
+void arch_ftrace_update_code(int command);
+
+struct ftrace_rec_iter;
+
+struct ftrace_rec_iter *ftrace_rec_iter_start(void);
+struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter);
+struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter);
+
+#define for_ftrace_rec_iter(iter) \
+ for (iter = ftrace_rec_iter_start(); \
+ iter; \
+ iter = ftrace_rec_iter_next(iter))
+
+
+int ftrace_update_record(struct dyn_ftrace *rec, int enable);
+int ftrace_test_record(struct dyn_ftrace *rec, int enable);
+void ftrace_run_stop_machine(int command);
+unsigned long ftrace_location(unsigned long ip);
+unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec);
+unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec);
+
+extern ftrace_func_t ftrace_trace_function;
+
+int ftrace_regex_open(struct ftrace_ops *ops, int flag,
+ struct inode *inode, struct file *file);
+ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
+ size_t cnt, loff_t *ppos);
+ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
+ size_t cnt, loff_t *ppos);
+int ftrace_regex_release(struct inode *inode, struct file *file);
+
+void __init
+ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable);
+
/* defined in arch */
extern int ftrace_ip_converted(unsigned long ip);
-extern int ftrace_dyn_arch_init(void *data);
+extern int ftrace_dyn_arch_init(void);
+extern void ftrace_replace_code(int enable);
extern int ftrace_update_ftrace_func(ftrace_func_t func);
extern void ftrace_caller(void);
+extern void ftrace_regs_caller(void);
extern void ftrace_call(void);
+extern void ftrace_regs_call(void);
extern void mcount_call(void);
+void ftrace_modify_all_code(int command);
+
#ifndef FTRACE_ADDR
#define FTRACE_ADDR ((unsigned long)ftrace_caller)
#endif
+
+#ifndef FTRACE_REGS_ADDR
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+# define FTRACE_REGS_ADDR ((unsigned long)ftrace_regs_caller)
+#else
+# define FTRACE_REGS_ADDR FTRACE_ADDR
+#endif
+#endif
+
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
extern void ftrace_graph_caller(void);
extern int ftrace_enable_ftrace_graph_caller(void);
@@ -238,34 +494,89 @@ extern int ftrace_make_nop(struct module *mod,
*/
extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr);
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+/**
+ * ftrace_modify_call - convert from one addr to another (no nop)
+ * @rec: the mcount call site record
+ * @old_addr: the address expected to be currently called to
+ * @addr: the address to change to
+ *
+ * This is a very sensitive operation and great care needs
+ * to be taken by the arch. The operation should carefully
+ * read the location, check to see if what is read is indeed
+ * what we expect it to be, and then on success of the compare,
+ * it should write to the location.
+ *
+ * The code segment at @rec->ip should be a caller to @old_addr
+ *
+ * Return must be:
+ * 0 on success
+ * -EFAULT on error reading the location
+ * -EINVAL on a failed compare of the contents
+ * -EPERM on error writing to the location
+ * Any other value will be considered a failure.
+ */
+extern int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
+ unsigned long addr);
+#else
+/* Should never be called */
+static inline int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
+ unsigned long addr)
+{
+ return -EINVAL;
+}
+#endif
+
/* May be defined in arch */
extern int ftrace_arch_read_dyn_info(char *buf, int size);
extern int skip_trace(unsigned long ip);
+extern void ftrace_module_init(struct module *mod);
extern void ftrace_disable_daemon(void);
extern void ftrace_enable_daemon(void);
-#else
+#else /* CONFIG_DYNAMIC_FTRACE */
static inline int skip_trace(unsigned long ip) { return 0; }
static inline int ftrace_force_update(void) { return 0; }
-static inline void ftrace_set_filter(unsigned char *buf, int len, int reset)
-{
-}
static inline void ftrace_disable_daemon(void) { }
static inline void ftrace_enable_daemon(void) { }
static inline void ftrace_release_mod(struct module *mod) {}
-static inline int register_ftrace_command(struct ftrace_func_command *cmd)
+static inline void ftrace_module_init(struct module *mod) {}
+static inline __init int register_ftrace_command(struct ftrace_func_command *cmd)
{
return -EINVAL;
}
-static inline int unregister_ftrace_command(char *cmd_name)
+static inline __init int unregister_ftrace_command(char *cmd_name)
{
return -EINVAL;
}
-static inline int ftrace_text_reserved(void *start, void *end)
+static inline int ftrace_text_reserved(const void *start, const void *end)
+{
+ return 0;
+}
+static inline unsigned long ftrace_location(unsigned long ip)
{
return 0;
}
+
+/*
+ * Again users of functions that have ftrace_ops may not
+ * have them defined when ftrace is not enabled, but these
+ * functions may still be called. Use a macro instead of inline.
+ */
+#define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; })
+#define ftrace_set_early_filter(ops, buf, enable) do { } while (0)
+#define ftrace_set_filter_ip(ops, ip, remove, reset) ({ -ENODEV; })
+#define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; })
+#define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; })
+#define ftrace_free_filter(ops) do { } while (0)
+
+static inline ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
+ size_t cnt, loff_t *ppos) { return -ENODEV; }
+static inline ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
+ size_t cnt, loff_t *ppos) { return -ENODEV; }
+static inline int
+ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; }
#endif /* CONFIG_DYNAMIC_FTRACE */
/* totally disable ftrace - can not re-enable after this */
@@ -301,25 +612,27 @@ static inline void __ftrace_enabled_restore(int enabled)
#endif
}
-#ifndef HAVE_ARCH_CALLER_ADDR
+/* All archs should have this, but we define it for consistency */
+#ifndef ftrace_return_address0
+# define ftrace_return_address0 __builtin_return_address(0)
+#endif
+
+/* Archs may use other ways for ADDR1 and beyond */
+#ifndef ftrace_return_address
# ifdef CONFIG_FRAME_POINTER
-# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
-# define CALLER_ADDR1 ((unsigned long)__builtin_return_address(1))
-# define CALLER_ADDR2 ((unsigned long)__builtin_return_address(2))
-# define CALLER_ADDR3 ((unsigned long)__builtin_return_address(3))
-# define CALLER_ADDR4 ((unsigned long)__builtin_return_address(4))
-# define CALLER_ADDR5 ((unsigned long)__builtin_return_address(5))
-# define CALLER_ADDR6 ((unsigned long)__builtin_return_address(6))
+# define ftrace_return_address(n) __builtin_return_address(n)
# else
-# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
-# define CALLER_ADDR1 0UL
-# define CALLER_ADDR2 0UL
-# define CALLER_ADDR3 0UL
-# define CALLER_ADDR4 0UL
-# define CALLER_ADDR5 0UL
-# define CALLER_ADDR6 0UL
+# define ftrace_return_address(n) 0UL
# endif
-#endif /* ifndef HAVE_ARCH_CALLER_ADDR */
+#endif
+
+#define CALLER_ADDR0 ((unsigned long)ftrace_return_address0)
+#define CALLER_ADDR1 ((unsigned long)ftrace_return_address(1))
+#define CALLER_ADDR2 ((unsigned long)ftrace_return_address(2))
+#define CALLER_ADDR3 ((unsigned long)ftrace_return_address(3))
+#define CALLER_ADDR4 ((unsigned long)ftrace_return_address(4))
+#define CALLER_ADDR5 ((unsigned long)ftrace_return_address(5))
+#define CALLER_ADDR6 ((unsigned long)ftrace_return_address(6))
#ifdef CONFIG_IRQSOFF_TRACER
extern void time_hardirqs_on(unsigned long a0, unsigned long a1);
@@ -333,8 +646,12 @@ static inline void __ftrace_enabled_restore(int enabled)
extern void trace_preempt_on(unsigned long a0, unsigned long a1);
extern void trace_preempt_off(unsigned long a0, unsigned long a1);
#else
- static inline void trace_preempt_on(unsigned long a0, unsigned long a1) { }
- static inline void trace_preempt_off(unsigned long a0, unsigned long a1) { }
+/*
+ * Use defines instead of static inlines because some arches will make code out
+ * of the CALLER_ADDR, when we really want these to be a real nop.
+ */
+# define trace_preempt_on(a0, a1) do { } while (0)
+# define trace_preempt_off(a0, a1) do { } while (0)
#endif
#ifdef CONFIG_FTRACE_MCOUNT_RECORD
@@ -413,6 +730,7 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
extern char __irqentry_text_start[];
extern char __irqentry_text_end[];
+#define FTRACE_NOTRACE_DEPTH 65536
#define FTRACE_RETFUNC_DEPTH 50
#define FTRACE_RETSTACK_ALLOC_SIZE 32
extern int register_ftrace_graph(trace_func_graph_ret_t retfunc,
@@ -428,6 +746,7 @@ extern void unregister_ftrace_graph(void);
extern void ftrace_graph_init_task(struct task_struct *t);
extern void ftrace_graph_exit_task(struct task_struct *t);
+extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu);
static inline int task_curr_ret_stack(struct task_struct *t)
{
@@ -451,6 +770,7 @@ static inline void unpause_graph_tracing(void)
static inline void ftrace_graph_init_task(struct task_struct *t) { }
static inline void ftrace_graph_exit_task(struct task_struct *t) { }
+static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { }
static inline int register_ftrace_graph(trace_func_graph_ret_t retfunc,
trace_func_graph_ent_t entryfunc)
@@ -514,10 +834,15 @@ enum ftrace_dump_mode;
extern enum ftrace_dump_mode ftrace_dump_on_oops;
+extern void disable_trace_on_warning(void);
+extern int __disable_trace_on_warning;
+
#ifdef CONFIG_PREEMPT
#define INIT_TRACE_RECURSION .trace_recursion = 0,
#endif
+#else /* CONFIG_TRACING */
+static inline void disable_trace_on_warning(void) { }
#endif /* CONFIG_TRACING */
#ifndef INIT_TRACE_RECURSION