aboutsummaryrefslogtreecommitdiff
path: root/kernel/trace/ftrace.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/ftrace.c')
-rw-r--r--kernel/trace/ftrace.c377
1 files changed, 244 insertions, 133 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 72a0f81dc5a..1fd4b947921 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -85,6 +85,8 @@ int function_trace_stop __read_mostly;
/* Current function tracing op */
struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
+/* What to set function_trace_op to */
+static struct ftrace_ops *set_function_trace_op;
/* List for set_ftrace_pid's pids. */
LIST_HEAD(ftrace_pids);
@@ -235,14 +237,13 @@ static int control_ops_alloc(struct ftrace_ops *ops)
return 0;
}
-static void control_ops_free(struct ftrace_ops *ops)
-{
- free_percpu(ops->disabled);
-}
-
static void update_global_ops(void)
{
- ftrace_func_t func;
+ ftrace_func_t func = ftrace_global_list_func;
+ void *private = NULL;
+
+ /* The list has its own recursion protection. */
+ global_ops.flags |= FTRACE_OPS_FL_RECURSION_SAFE;
/*
* If there's only one function registered, then call that
@@ -252,23 +253,17 @@ static void update_global_ops(void)
if (ftrace_global_list == &ftrace_list_end ||
ftrace_global_list->next == &ftrace_list_end) {
func = ftrace_global_list->func;
+ private = ftrace_global_list->private;
/*
* As we are calling the function directly.
* If it does not have recursion protection,
* the function_trace_op needs to be updated
* accordingly.
*/
- if (ftrace_global_list->flags & FTRACE_OPS_FL_RECURSION_SAFE)
- global_ops.flags |= FTRACE_OPS_FL_RECURSION_SAFE;
- else
+ if (!(ftrace_global_list->flags & FTRACE_OPS_FL_RECURSION_SAFE))
global_ops.flags &= ~FTRACE_OPS_FL_RECURSION_SAFE;
- } else {
- func = ftrace_global_list_func;
- /* The list has its own recursion protection. */
- global_ops.flags |= FTRACE_OPS_FL_RECURSION_SAFE;
}
-
/* If we filter on pids, update to use the pid function */
if (!list_empty(&ftrace_pids)) {
set_ftrace_pid_function(func);
@@ -276,8 +271,32 @@ static void update_global_ops(void)
}
global_ops.func = func;
+ global_ops.private = private;
+}
+
+static void ftrace_sync(struct work_struct *work)
+{
+ /*
+ * This function is just a stub to implement a hard force
+ * of synchronize_sched(). This requires synchronizing
+ * tasks even in userspace and idle.
+ *
+ * Yes, function tracing is rude.
+ */
}
+static void ftrace_sync_ipi(void *data)
+{
+ /* Probably not needed, but do it anyway */
+ smp_rmb();
+}
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+static void update_function_graph_func(void);
+#else
+static inline void update_function_graph_func(void) { }
+#endif
+
static void update_ftrace_function(void)
{
ftrace_func_t func;
@@ -296,16 +315,61 @@ static void update_ftrace_function(void)
!FTRACE_FORCE_LIST_FUNC)) {
/* Set the ftrace_ops that the arch callback uses */
if (ftrace_ops_list == &global_ops)
- function_trace_op = ftrace_global_list;
+ set_function_trace_op = ftrace_global_list;
else
- function_trace_op = ftrace_ops_list;
+ set_function_trace_op = ftrace_ops_list;
func = ftrace_ops_list->func;
} else {
/* Just use the default ftrace_ops */
- function_trace_op = &ftrace_list_end;
+ set_function_trace_op = &ftrace_list_end;
func = ftrace_ops_list_func;
}
+ /* If there's no change, then do nothing more here */
+ if (ftrace_trace_function == func)
+ return;
+
+ update_function_graph_func();
+
+ /*
+ * If we are using the list function, it doesn't care
+ * about the function_trace_ops.
+ */
+ if (func == ftrace_ops_list_func) {
+ ftrace_trace_function = func;
+ /*
+ * Don't even bother setting function_trace_ops,
+ * it would be racy to do so anyway.
+ */
+ return;
+ }
+
+#ifndef CONFIG_DYNAMIC_FTRACE
+ /*
+ * For static tracing, we need to be a bit more careful.
+ * The function change takes affect immediately. Thus,
+ * we need to coorditate the setting of the function_trace_ops
+ * with the setting of the ftrace_trace_function.
+ *
+ * Set the function to the list ops, which will call the
+ * function we want, albeit indirectly, but it handles the
+ * ftrace_ops and doesn't depend on function_trace_op.
+ */
+ ftrace_trace_function = ftrace_ops_list_func;
+ /*
+ * Make sure all CPUs see this. Yes this is slow, but static
+ * tracing is slow and nasty to have enabled.
+ */
+ schedule_on_each_cpu(ftrace_sync);
+ /* Now all cpus are using the list ops. */
+ function_trace_op = set_function_trace_op;
+ /* Make sure the function_trace_op is visible on all CPUs */
+ smp_wmb();
+ /* Nasty way to force a rmb on all cpus */
+ smp_call_function(ftrace_sync_ipi, NULL, 1);
+ /* OK, we are all set to update the ftrace_trace_function now! */
+#endif /* !CONFIG_DYNAMIC_FTRACE */
+
ftrace_trace_function = func;
}
@@ -367,6 +431,9 @@ static int remove_ftrace_list_ops(struct ftrace_ops **list,
static int __register_ftrace_function(struct ftrace_ops *ops)
{
+ if (ops->flags & FTRACE_OPS_FL_DELETED)
+ return -EINVAL;
+
if (FTRACE_WARN_ON(ops == &global_ops))
return -EINVAL;
@@ -410,17 +477,6 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
return 0;
}
-static void ftrace_sync(struct work_struct *work)
-{
- /*
- * This function is just a stub to implement a hard force
- * of synchronize_sched(). This requires synchronizing
- * tasks even in userspace and idle.
- *
- * Yes, function tracing is rude.
- */
-}
-
static int __unregister_ftrace_function(struct ftrace_ops *ops)
{
int ret;
@@ -439,20 +495,6 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
} else if (ops->flags & FTRACE_OPS_FL_CONTROL) {
ret = remove_ftrace_list_ops(&ftrace_control_list,
&control_ops, ops);
- if (!ret) {
- /*
- * The ftrace_ops is now removed from the list,
- * so there'll be no new users. We must ensure
- * all current users are done before we free
- * the control data.
- * Note synchronize_sched() is not enough, as we
- * use preempt_disable() to do RCU, but the function
- * tracer can be called where RCU is not active
- * (before user_exit()).
- */
- schedule_on_each_cpu(ftrace_sync);
- control_ops_free(ops);
- }
} else
ret = remove_ftrace_ops(&ftrace_ops_list, ops);
@@ -462,17 +504,6 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
if (ftrace_enabled)
update_ftrace_function();
- /*
- * Dynamic ops may be freed, we must make sure that all
- * callers are done before leaving this function.
- *
- * Again, normal synchronize_sched() is not good enough.
- * We need to do a hard force of sched synchronization.
- */
- if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
- schedule_on_each_cpu(ftrace_sync);
-
-
return 0;
}
@@ -1082,19 +1113,6 @@ static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
static struct pid * const ftrace_swapper_pid = &init_struct_pid;
-loff_t
-ftrace_filter_lseek(struct file *file, loff_t offset, int whence)
-{
- loff_t ret;
-
- if (file->f_mode & FMODE_READ)
- ret = seq_lseek(file, offset, whence);
- else
- file->f_pos = ret = 1;
-
- return ret;
-}
-
#ifdef CONFIG_DYNAMIC_FTRACE
#ifndef CONFIG_FTRACE_MCOUNT_RECORD
@@ -1151,8 +1169,6 @@ struct ftrace_page {
int size;
};
-static struct ftrace_page *ftrace_new_pgs;
-
#define ENTRY_SIZE sizeof(struct dyn_ftrace)
#define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE)
@@ -1539,7 +1555,7 @@ unsigned long ftrace_location(unsigned long ip)
* the function tracer. It checks the ftrace internal tables to
* determine if the address belongs or not.
*/
-int ftrace_text_reserved(void *start, void *end)
+int ftrace_text_reserved(const void *start, const void *end)
{
unsigned long ret;
@@ -1973,6 +1989,7 @@ int __weak ftrace_arch_code_modify_post_process(void)
void ftrace_modify_all_code(int command)
{
int update = command & FTRACE_UPDATE_TRACE_FUNC;
+ int err = 0;
/*
* If the ftrace_caller calls a ftrace_ops func directly,
@@ -1984,21 +2001,33 @@ void ftrace_modify_all_code(int command)
* to make sure the ops are having the right functions
* traced.
*/
- if (update)
- ftrace_update_ftrace_func(ftrace_ops_list_func);
+ if (update) {
+ err = ftrace_update_ftrace_func(ftrace_ops_list_func);
+ if (FTRACE_WARN_ON(err))
+ return;
+ }
if (command & FTRACE_UPDATE_CALLS)
ftrace_replace_code(1);
else if (command & FTRACE_DISABLE_CALLS)
ftrace_replace_code(0);
- if (update && ftrace_trace_function != ftrace_ops_list_func)
- ftrace_update_ftrace_func(ftrace_trace_function);
+ if (update && ftrace_trace_function != ftrace_ops_list_func) {
+ function_trace_op = set_function_trace_op;
+ smp_wmb();
+ /* If irqs are disabled, we are in stop machine */
+ if (!irqs_disabled())
+ smp_call_function(ftrace_sync_ipi, NULL, 1);
+ err = ftrace_update_ftrace_func(ftrace_trace_function);
+ if (FTRACE_WARN_ON(err))
+ return;
+ }
if (command & FTRACE_START_FUNC_RET)
- ftrace_enable_ftrace_graph_caller();
+ err = ftrace_enable_ftrace_graph_caller();
else if (command & FTRACE_STOP_FUNC_RET)
- ftrace_disable_ftrace_graph_caller();
+ err = ftrace_disable_ftrace_graph_caller();
+ FTRACE_WARN_ON(err);
}
static int __ftrace_modify_code(void *data)
@@ -2066,6 +2095,11 @@ static ftrace_func_t saved_ftrace_func;
static int ftrace_start_up;
static int global_start_up;
+static void control_ops_free(struct ftrace_ops *ops)
+{
+ free_percpu(ops->disabled);
+}
+
static void ftrace_startup_enable(int command)
{
if (saved_ftrace_func != ftrace_trace_function) {
@@ -2156,10 +2190,41 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
command |= FTRACE_UPDATE_TRACE_FUNC;
}
- if (!command || !ftrace_enabled)
+ if (!command || !ftrace_enabled) {
+ /*
+ * If these are control ops, they still need their
+ * per_cpu field freed. Since, function tracing is
+ * not currently active, we can just free them
+ * without synchronizing all CPUs.
+ */
+ if (ops->flags & FTRACE_OPS_FL_CONTROL)
+ control_ops_free(ops);
return 0;
+ }
ftrace_run_update_code(command);
+
+ /*
+ * Dynamic ops may be freed, we must make sure that all
+ * callers are done before leaving this function.
+ * The same goes for freeing the per_cpu data of the control
+ * ops.
+ *
+ * Again, normal synchronize_sched() is not good enough.
+ * We need to do a hard force of sched synchronization.
+ * This is because we use preempt_disable() to do RCU, but
+ * the function tracers can be called where RCU is not watching
+ * (like before user_exit()). We can not rely on the RCU
+ * infrastructure to do the synchronization, thus we must do it
+ * ourselves.
+ */
+ if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_CONTROL)) {
+ schedule_on_each_cpu(ftrace_sync);
+
+ if (ops->flags & FTRACE_OPS_FL_CONTROL)
+ control_ops_free(ops);
+ }
+
return 0;
}
@@ -2186,7 +2251,6 @@ static void ftrace_shutdown_sysctl(void)
}
static cycle_t ftrace_update_time;
-static unsigned long ftrace_update_cnt;
unsigned long ftrace_update_tot_cnt;
static inline int ops_traces_mod(struct ftrace_ops *ops)
@@ -2242,11 +2306,12 @@ static int referenced_filters(struct dyn_ftrace *rec)
return cnt;
}
-static int ftrace_update_code(struct module *mod)
+static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs)
{
struct ftrace_page *pg;
struct dyn_ftrace *p;
cycle_t start, stop;
+ unsigned long update_cnt = 0;
unsigned long ref = 0;
bool test = false;
int i;
@@ -2272,9 +2337,8 @@ static int ftrace_update_code(struct module *mod)
}
start = ftrace_now(raw_smp_processor_id());
- ftrace_update_cnt = 0;
- for (pg = ftrace_new_pgs; pg; pg = pg->next) {
+ for (pg = new_pgs; pg; pg = pg->next) {
for (i = 0; i < pg->index; i++) {
int cnt = ref;
@@ -2295,7 +2359,7 @@ static int ftrace_update_code(struct module *mod)
if (!ftrace_code_disable(mod, p))
break;
- ftrace_update_cnt++;
+ update_cnt++;
/*
* If the tracing is enabled, go ahead and enable the record.
@@ -2314,11 +2378,9 @@ static int ftrace_update_code(struct module *mod)
}
}
- ftrace_new_pgs = NULL;
-
stop = ftrace_now(raw_smp_processor_id());
ftrace_update_time = stop - start;
- ftrace_update_tot_cnt += ftrace_update_cnt;
+ ftrace_update_tot_cnt += update_cnt;
return 0;
}
@@ -2410,22 +2472,6 @@ ftrace_allocate_pages(unsigned long num_to_init)
return NULL;
}
-static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
-{
- int cnt;
-
- if (!num_to_init) {
- pr_info("ftrace: No functions to be traced?\n");
- return -1;
- }
-
- cnt = num_to_init / ENTRIES_PER_PAGE;
- pr_info("ftrace: allocating %ld entries in %d pages\n",
- num_to_init, cnt + 1);
-
- return 0;
-}
-
#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
struct ftrace_iterator {
@@ -2739,7 +2785,7 @@ static void ftrace_filter_reset(struct ftrace_hash *hash)
* routine, you can use ftrace_filter_write() for the write
* routine if @flag has FTRACE_ITER_FILTER set, or
* ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set.
- * ftrace_filter_lseek() should be used as the lseek routine, and
+ * tracing_lseek() should be used as the lseek routine, and
* release must call ftrace_regex_release().
*/
int
@@ -2813,7 +2859,9 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag,
static int
ftrace_filter_open(struct inode *inode, struct file *file)
{
- return ftrace_regex_open(&global_ops,
+ struct ftrace_ops *ops = inode->i_private;
+
+ return ftrace_regex_open(ops,
FTRACE_ITER_FILTER | FTRACE_ITER_DO_HASH,
inode, file);
}
@@ -2821,7 +2869,9 @@ ftrace_filter_open(struct inode *inode, struct file *file)
static int
ftrace_notrace_open(struct inode *inode, struct file *file)
{
- return ftrace_regex_open(&global_ops, FTRACE_ITER_NOTRACE,
+ struct ftrace_ops *ops = inode->i_private;
+
+ return ftrace_regex_open(ops, FTRACE_ITER_NOTRACE,
inode, file);
}
@@ -3767,7 +3817,7 @@ static const struct file_operations ftrace_filter_fops = {
.open = ftrace_filter_open,
.read = seq_read,
.write = ftrace_filter_write,
- .llseek = ftrace_filter_lseek,
+ .llseek = tracing_lseek,
.release = ftrace_regex_release,
};
@@ -3775,7 +3825,7 @@ static const struct file_operations ftrace_notrace_fops = {
.open = ftrace_notrace_open,
.read = seq_read,
.write = ftrace_notrace_write,
- .llseek = ftrace_filter_lseek,
+ .llseek = tracing_lseek,
.release = ftrace_regex_release,
};
@@ -4038,7 +4088,7 @@ static const struct file_operations ftrace_graph_fops = {
.open = ftrace_graph_open,
.read = seq_read,
.write = ftrace_graph_write,
- .llseek = ftrace_filter_lseek,
+ .llseek = tracing_lseek,
.release = ftrace_graph_release,
};
@@ -4046,11 +4096,41 @@ static const struct file_operations ftrace_graph_notrace_fops = {
.open = ftrace_graph_notrace_open,
.read = seq_read,
.write = ftrace_graph_write,
- .llseek = ftrace_filter_lseek,
+ .llseek = tracing_lseek,
.release = ftrace_graph_release,
};
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+void ftrace_create_filter_files(struct ftrace_ops *ops,
+ struct dentry *parent)
+{
+
+ trace_create_file("set_ftrace_filter", 0644, parent,
+ ops, &ftrace_filter_fops);
+
+ trace_create_file("set_ftrace_notrace", 0644, parent,
+ ops, &ftrace_notrace_fops);
+}
+
+/*
+ * The name "destroy_filter_files" is really a misnomer. Although
+ * in the future, it may actualy delete the files, but this is
+ * really intended to make sure the ops passed in are disabled
+ * and that when this function returns, the caller is free to
+ * free the ops.
+ *
+ * The "destroy" name is only to match the "create" name that this
+ * should be paired with.
+ */
+void ftrace_destroy_filter_files(struct ftrace_ops *ops)
+{
+ mutex_lock(&ftrace_lock);
+ if (ops->flags & FTRACE_OPS_FL_ENABLED)
+ ftrace_shutdown(ops, 0);
+ ops->flags |= FTRACE_OPS_FL_DELETED;
+ mutex_unlock(&ftrace_lock);
+}
+
static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
{
@@ -4060,11 +4140,7 @@ static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
trace_create_file("enabled_functions", 0444,
d_tracer, NULL, &ftrace_enabled_fops);
- trace_create_file("set_ftrace_filter", 0644, d_tracer,
- NULL, &ftrace_filter_fops);
-
- trace_create_file("set_ftrace_notrace", 0644, d_tracer,
- NULL, &ftrace_notrace_fops);
+ ftrace_create_filter_files(&global_ops, d_tracer);
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
trace_create_file("set_graph_function", 0444, d_tracer,
@@ -4180,9 +4256,6 @@ static int ftrace_process_locs(struct module *mod,
/* Assign the last page to ftrace_pages */
ftrace_pages = pg;
- /* These new locations need to be initialized */
- ftrace_new_pgs = start_pg;
-
/*
* We only need to disable interrupts on start up
* because we are modifying code that an interrupt
@@ -4193,7 +4266,7 @@ static int ftrace_process_locs(struct module *mod,
*/
if (!mod)
local_irq_save(flags);
- ftrace_update_code(mod);
+ ftrace_update_code(mod, start_pg);
if (!mod)
local_irq_restore(flags);
ret = 0;
@@ -4302,30 +4375,27 @@ struct notifier_block ftrace_module_exit_nb = {
.priority = INT_MIN, /* Run after anything that can remove kprobes */
};
-extern unsigned long __start_mcount_loc[];
-extern unsigned long __stop_mcount_loc[];
-
void __init ftrace_init(void)
{
- unsigned long count, addr, flags;
+ extern unsigned long __start_mcount_loc[];
+ extern unsigned long __stop_mcount_loc[];
+ unsigned long count, flags;
int ret;
- /* Keep the ftrace pointer to the stub */
- addr = (unsigned long)ftrace_stub;
-
local_irq_save(flags);
- ftrace_dyn_arch_init(&addr);
+ ret = ftrace_dyn_arch_init();
local_irq_restore(flags);
-
- /* ftrace_dyn_arch_init places the return code in addr */
- if (addr)
+ if (ret)
goto failed;
count = __stop_mcount_loc - __start_mcount_loc;
-
- ret = ftrace_dyn_table_alloc(count);
- if (ret)
+ if (!count) {
+ pr_info("ftrace: No functions to be traced?\n");
goto failed;
+ }
+
+ pr_info("ftrace: allocating %ld entries in %ld pages\n",
+ count, count / ENTRIES_PER_PAGE + 1);
last_ftrace_enabled = ftrace_enabled = 1;
@@ -4373,7 +4443,13 @@ static inline void ftrace_startup_enable(int command) { }
(ops)->flags |= FTRACE_OPS_FL_ENABLED; \
___ret; \
})
-# define ftrace_shutdown(ops, command) __unregister_ftrace_function(ops)
+# define ftrace_shutdown(ops, command) \
+ ({ \
+ int ___ret = __unregister_ftrace_function(ops); \
+ if (!___ret) \
+ (ops)->flags &= ~FTRACE_OPS_FL_ENABLED; \
+ ___ret; \
+ })
# define ftrace_startup_sysctl() do { } while (0)
# define ftrace_shutdown_sysctl() do { } while (0)
@@ -4719,7 +4795,7 @@ static const struct file_operations ftrace_pid_fops = {
.open = ftrace_pid_open,
.write = ftrace_pid_write,
.read = seq_read,
- .llseek = ftrace_filter_lseek,
+ .llseek = tracing_lseek,
.release = ftrace_pid_release,
};
@@ -4862,6 +4938,7 @@ int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
trace_func_graph_ret_t ftrace_graph_return =
(trace_func_graph_ret_t)ftrace_stub;
trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
+static trace_func_graph_ent_t __ftrace_graph_entry = ftrace_graph_entry_stub;
/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
@@ -5003,6 +5080,30 @@ static struct ftrace_ops fgraph_ops __read_mostly = {
FTRACE_OPS_FL_RECURSION_SAFE,
};
+static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace)
+{
+ if (!ftrace_ops_test(&global_ops, trace->func, NULL))
+ return 0;
+ return __ftrace_graph_entry(trace);
+}
+
+/*
+ * The function graph tracer should only trace the functions defined
+ * by set_ftrace_filter and set_ftrace_notrace. If another function
+ * tracer ops is registered, the graph tracer requires testing the
+ * function against the global ops, and not just trace any function
+ * that any ftrace_ops registered.
+ */
+static void update_function_graph_func(void)
+{
+ if (ftrace_ops_list == &ftrace_list_end ||
+ (ftrace_ops_list == &global_ops &&
+ global_ops.next == &ftrace_list_end))
+ ftrace_graph_entry = __ftrace_graph_entry;
+ else
+ ftrace_graph_entry = ftrace_graph_entry_test;
+}
+
int register_ftrace_graph(trace_func_graph_ret_t retfunc,
trace_func_graph_ent_t entryfunc)
{
@@ -5027,7 +5128,16 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
}
ftrace_graph_return = retfunc;
- ftrace_graph_entry = entryfunc;
+
+ /*
+ * Update the indirect function to the entryfunc, and the
+ * function that gets called to the entry_test first. Then
+ * call the update fgraph entry function to determine if
+ * the entryfunc should be called directly or not.
+ */
+ __ftrace_graph_entry = entryfunc;
+ ftrace_graph_entry = ftrace_graph_entry_test;
+ update_function_graph_func();
ret = ftrace_startup(&fgraph_ops, FTRACE_START_FUNC_RET);
@@ -5046,6 +5156,7 @@ void unregister_ftrace_graph(void)
ftrace_graph_active--;
ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
ftrace_graph_entry = ftrace_graph_entry_stub;
+ __ftrace_graph_entry = ftrace_graph_entry_stub;
ftrace_shutdown(&fgraph_ops, FTRACE_STOP_FUNC_RET);
unregister_pm_notifier(&ftrace_suspend_notifier);
unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);