diff options
Diffstat (limited to 'kernel/trace/trace.h')
| -rw-r--r-- | kernel/trace/trace.h | 35 | 
1 files changed, 15 insertions, 20 deletions
| diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 20572ed88c5..afaae41b0a0 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -130,19 +130,12 @@ enum trace_flag_type {  struct trace_array; -struct trace_cpu { -	struct trace_array	*tr; -	struct dentry		*dir; -	int			cpu; -}; -  /*   * The CPU trace array - it consists of thousands of trace entries   * plus some other descriptor data: (for example which task started   * the trace, etc.)   */  struct trace_array_cpu { -	struct trace_cpu	trace_cpu;  	atomic_t		disabled;  	void			*buffer_page;	/* ring buffer spare */ @@ -196,7 +189,6 @@ struct trace_array {  	bool			allocated_snapshot;  #endif  	int			buffer_disabled; -	struct trace_cpu	trace_cpu;	/* place holder */  #ifdef CONFIG_FTRACE_SYSCALLS  	int			sys_refcount_enter;  	int			sys_refcount_exit; @@ -214,7 +206,6 @@ struct trace_array {  	struct dentry		*event_dir;  	struct list_head	systems;  	struct list_head	events; -	struct task_struct	*waiter;  	int			ref;  }; @@ -224,6 +215,11 @@ enum {  extern struct list_head ftrace_trace_arrays; +extern struct mutex trace_types_lock; + +extern int trace_array_get(struct trace_array *tr); +extern void trace_array_put(struct trace_array *tr); +  /*   * The global tracer (top) should be the first trace array added,   * but we check the flag anyway. @@ -554,11 +550,6 @@ void tracing_iter_reset(struct trace_iterator *iter, int cpu);  void poll_wait_pipe(struct trace_iterator *iter); -void ftrace(struct trace_array *tr, -			    struct trace_array_cpu *data, -			    unsigned long ip, -			    unsigned long parent_ip, -			    unsigned long flags, int pc);  void tracing_sched_switch_trace(struct trace_array *tr,  				struct task_struct *prev,  				struct task_struct *next, @@ -680,6 +671,15 @@ extern int trace_selftest_startup_sched_switch(struct tracer *trace,  					       struct trace_array *tr);  extern int trace_selftest_startup_branch(struct tracer *trace,  					 struct trace_array *tr); +/* + * Tracer data references selftest functions that only occur + * on boot up. These can be __init functions. Thus, when selftests + * are enabled, then the tracers need to reference __init functions. + */ +#define __tracer_data		__refdata +#else +/* Tracers are seldom changed. Optimize when selftests are disabled. */ +#define __tracer_data		__read_mostly  #endif /* CONFIG_FTRACE_STARTUP_TEST */  extern void *head_page(struct trace_array_cpu *data); @@ -774,6 +774,7 @@ print_graph_function_flags(struct trace_iterator *iter, u32 flags)  extern struct list_head ftrace_pids;  #ifdef CONFIG_FUNCTION_TRACER +extern bool ftrace_filter_param __initdata;  static inline int ftrace_trace_task(struct task_struct *task)  {  	if (list_empty(&ftrace_pids)) @@ -899,12 +900,6 @@ static inline void trace_branch_disable(void)  /* set ring buffers to default size if not already done so */  int tracing_update_buffers(void); -/* trace event type bit fields, not numeric */ -enum { -	TRACE_EVENT_TYPE_PRINTF		= 1, -	TRACE_EVENT_TYPE_RAW		= 2, -}; -  struct ftrace_event_field {  	struct list_head	link;  	const char		*name; | 
