diff options
Diffstat (limited to 'include/trace')
61 files changed, 13000 insertions, 902 deletions
diff --git a/include/trace/define_trace.h b/include/trace/define_trace.h index 1dfab540151..02e1003568a 100644 --- a/include/trace/define_trace.h +++ b/include/trace/define_trace.h @@ -1,5 +1,5 @@  /* - * Trace files that want to automate creationg of all tracepoints defined + * Trace files that want to automate creation of all tracepoints defined   * in their file should include this file. The following are macros that the   * trace file may define:   * @@ -26,6 +26,15 @@  #define TRACE_EVENT(name, proto, args, tstruct, assign, print)	\  	DEFINE_TRACE(name) +#undef TRACE_EVENT_CONDITION +#define TRACE_EVENT_CONDITION(name, proto, args, cond, tstruct, assign, print) \ +	TRACE_EVENT(name,						\ +		PARAMS(proto),						\ +		PARAMS(args),						\ +		PARAMS(tstruct),					\ +		PARAMS(assign),						\ +		PARAMS(print)) +  #undef TRACE_EVENT_FN  #define TRACE_EVENT_FN(name, proto, args, tstruct,		\  		assign, print, reg, unreg)			\ @@ -35,10 +44,18 @@  #define DEFINE_EVENT(template, name, proto, args) \  	DEFINE_TRACE(name) +#undef DEFINE_EVENT_FN +#define DEFINE_EVENT_FN(template, name, proto, args, reg, unreg) \ +	DEFINE_TRACE_FN(name, reg, unreg) +  #undef DEFINE_EVENT_PRINT  #define DEFINE_EVENT_PRINT(template, name, proto, args, print)	\  	DEFINE_TRACE(name) +#undef DEFINE_EVENT_CONDITION +#define DEFINE_EVENT_CONDITION(template, name, proto, args, cond) \ +	DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) +  #undef DECLARE_TRACE  #define DECLARE_TRACE(name, proto, args)	\  	DEFINE_TRACE(name) @@ -75,9 +92,12 @@  #undef TRACE_EVENT  #undef TRACE_EVENT_FN +#undef TRACE_EVENT_CONDITION  #undef DECLARE_EVENT_CLASS  #undef DEFINE_EVENT +#undef DEFINE_EVENT_FN  #undef DEFINE_EVENT_PRINT +#undef DEFINE_EVENT_CONDITION  #undef TRACE_HEADER_MULTI_READ  #undef DECLARE_TRACE diff --git a/include/trace/events/9p.h b/include/trace/events/9p.h new file mode 100644 index 00000000000..a0666362c11 --- /dev/null +++ b/include/trace/events/9p.h @@ -0,0 +1,154 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM 9p + +#if !defined(_TRACE_9P_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_9P_H + +#include <linux/tracepoint.h> + +#define show_9p_op(type)						\ +	__print_symbolic(type,						\ +			 { P9_TLERROR,		"P9_TLERROR" },		\ +			 { P9_RLERROR,		"P9_RLERROR" },		\ +			 { P9_TSTATFS,		"P9_TSTATFS" },		\ +			 { P9_RSTATFS,		"P9_RSTATFS" },		\ +			 { P9_TLOPEN,		"P9_TLOPEN" },		\ +			 { P9_RLOPEN,		"P9_RLOPEN" },		\ +			 { P9_TLCREATE,		"P9_TLCREATE" },	\ +			 { P9_RLCREATE,		"P9_RLCREATE" },	\ +			 { P9_TSYMLINK,		"P9_TSYMLINK" },	\ +			 { P9_RSYMLINK,		"P9_RSYMLINK" },	\ +			 { P9_TMKNOD,		"P9_TMKNOD" },		\ +			 { P9_RMKNOD,		"P9_RMKNOD" },		\ +			 { P9_TRENAME,		"P9_TRENAME" },		\ +			 { P9_RRENAME,		"P9_RRENAME" },		\ +			 { P9_TREADLINK,	"P9_TREADLINK" },	\ +			 { P9_RREADLINK,	"P9_RREADLINK" },	\ +			 { P9_TGETATTR,		"P9_TGETATTR" },	\ +			 { P9_RGETATTR,		"P9_RGETATTR" },	\ +			 { P9_TSETATTR,		"P9_TSETATTR" },	\ +			 { P9_RSETATTR,		"P9_RSETATTR" },	\ +			 { P9_TXATTRWALK,	"P9_TXATTRWALK" },	\ +			 { P9_RXATTRWALK,	"P9_RXATTRWALK" },	\ +			 { P9_TXATTRCREATE,	"P9_TXATTRCREATE" },	\ +			 { P9_RXATTRCREATE,	"P9_RXATTRCREATE" },	\ +			 { P9_TREADDIR,		"P9_TREADDIR" },	\ +			 { P9_RREADDIR,		"P9_RREADDIR" },	\ +			 { P9_TFSYNC,		"P9_TFSYNC" },		\ +			 { P9_RFSYNC,		"P9_RFSYNC" },		\ +			 { P9_TLOCK,		"P9_TLOCK" },		\ +			 { P9_RLOCK,		"P9_RLOCK" },		\ +			 { P9_TGETLOCK,		"P9_TGETLOCK" },	\ +			 { P9_RGETLOCK,		"P9_RGETLOCK" },	\ +			 { P9_TLINK,		"P9_TLINK" },		\ +			 { P9_RLINK,		"P9_RLINK" },		\ +			 { P9_TMKDIR,		"P9_TMKDIR" },		\ +			 { P9_RMKDIR,		"P9_RMKDIR" },		\ +			 { P9_TRENAMEAT,	"P9_TRENAMEAT" },	\ +			 { P9_RRENAMEAT,	"P9_RRENAMEAT" },	\ +			 { P9_TUNLINKAT,	"P9_TUNLINKAT" },	\ +			 { P9_RUNLINKAT,	"P9_RUNLINKAT" },	\ +			 { P9_TVERSION,		"P9_TVERSION" },	\ +			 { P9_RVERSION,		"P9_RVERSION" },	\ +			 { P9_TAUTH,		"P9_TAUTH" },		\ +			 { P9_RAUTH,		"P9_RAUTH" },		\ +			 { P9_TATTACH,		"P9_TATTACH" },		\ +			 { P9_RATTACH,		"P9_RATTACH" },		\ +			 { P9_TERROR,		"P9_TERROR" },		\ +			 { P9_RERROR,		"P9_RERROR" },		\ +			 { P9_TFLUSH,		"P9_TFLUSH" },		\ +			 { P9_RFLUSH,		"P9_RFLUSH" },		\ +			 { P9_TWALK,		"P9_TWALK" },		\ +			 { P9_RWALK,		"P9_RWALK" },		\ +			 { P9_TOPEN,		"P9_TOPEN" },		\ +			 { P9_ROPEN,		"P9_ROPEN" },		\ +			 { P9_TCREATE,		"P9_TCREATE" },		\ +			 { P9_RCREATE,		"P9_RCREATE" },		\ +			 { P9_TREAD,		"P9_TREAD" },		\ +			 { P9_RREAD,		"P9_RREAD" },		\ +			 { P9_TWRITE,		"P9_TWRITE" },		\ +			 { P9_RWRITE,		"P9_RWRITE" },		\ +			 { P9_TCLUNK,		"P9_TCLUNK" },		\ +			 { P9_RCLUNK,		"P9_RCLUNK" },		\ +			 { P9_TREMOVE,		"P9_TREMOVE" },		\ +			 { P9_RREMOVE,		"P9_RREMOVE" },		\ +			 { P9_TSTAT,		"P9_TSTAT" },		\ +			 { P9_RSTAT,		"P9_RSTAT" },		\ +			 { P9_TWSTAT,		"P9_TWSTAT" },		\ +			 { P9_RWSTAT,		"P9_RWSTAT" }) + +TRACE_EVENT(9p_client_req, +	    TP_PROTO(struct p9_client *clnt, int8_t type, int tag), + +	    TP_ARGS(clnt, type, tag), + +	    TP_STRUCT__entry( +		    __field(    void *,		clnt			     ) +		    __field(	__u8,		type			     ) +		    __field(	__u32,		tag			     ) +		    ), + +	    TP_fast_assign( +		    __entry->clnt    =  clnt; +		    __entry->type    =  type; +		    __entry->tag     =  tag; +		    ), + +	    TP_printk("client %lu request %s tag  %d", +		    (long)__entry->clnt, show_9p_op(__entry->type), +		    __entry->tag) + ); + +TRACE_EVENT(9p_client_res, +	    TP_PROTO(struct p9_client *clnt, int8_t type, int tag, int err), + +	    TP_ARGS(clnt, type, tag, err), + +	    TP_STRUCT__entry( +		    __field(    void *,		clnt			     ) +		    __field(	__u8,		type			     ) +		    __field(	__u32,		tag			     ) +		    __field(	__u32,		err			     ) +		    ), + +	    TP_fast_assign( +		    __entry->clnt    =  clnt; +		    __entry->type    =  type; +		    __entry->tag     =  tag; +		    __entry->err     =  err; +		    ), + +	    TP_printk("client %lu response %s tag  %d err %d", +		      (long)__entry->clnt, show_9p_op(__entry->type), +		      __entry->tag, __entry->err) +); + +/* dump 32 bytes of protocol data */ +#define P9_PROTO_DUMP_SZ 32 +TRACE_EVENT(9p_protocol_dump, +	    TP_PROTO(struct p9_client *clnt, struct p9_fcall *pdu), + +	    TP_ARGS(clnt, pdu), + +	    TP_STRUCT__entry( +		    __field(	void *,		clnt				) +		    __field(	__u8,		type				) +		    __field(	__u16,		tag				) +		    __array(	unsigned char,	line,	P9_PROTO_DUMP_SZ	) +		    ), + +	    TP_fast_assign( +		    __entry->clnt   =  clnt; +		    __entry->type   =  pdu->id; +		    __entry->tag    =  pdu->tag; +		    memcpy(__entry->line, pdu->sdata, P9_PROTO_DUMP_SZ); +		    ), +	    TP_printk("clnt %lu %s(tag = %d)\n%.3x: %16ph\n%.3x: %16ph\n", +		      (unsigned long)__entry->clnt, show_9p_op(__entry->type), +		      __entry->tag, 0, __entry->line, 16, __entry->line + 16) + ); + +#endif /* _TRACE_9P_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/asoc.h b/include/trace/events/asoc.h new file mode 100644 index 00000000000..c75c795a377 --- /dev/null +++ b/include/trace/events/asoc.h @@ -0,0 +1,319 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM asoc + +#if !defined(_TRACE_ASOC_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_ASOC_H + +#include <linux/ktime.h> +#include <linux/tracepoint.h> + +#define DAPM_DIRECT "(direct)" + +struct snd_soc_jack; +struct snd_soc_codec; +struct snd_soc_card; +struct snd_soc_dapm_widget; +struct snd_soc_dapm_path; + +DECLARE_EVENT_CLASS(snd_soc_card, + +	TP_PROTO(struct snd_soc_card *card, int val), + +	TP_ARGS(card, val), + +	TP_STRUCT__entry( +		__string(	name,		card->name	) +		__field(	int,		val		) +	), + +	TP_fast_assign( +		__assign_str(name, card->name); +		__entry->val = val; +	), + +	TP_printk("card=%s val=%d", __get_str(name), (int)__entry->val) +); + +DEFINE_EVENT(snd_soc_card, snd_soc_bias_level_start, + +	TP_PROTO(struct snd_soc_card *card, int val), + +	TP_ARGS(card, val) + +); + +DEFINE_EVENT(snd_soc_card, snd_soc_bias_level_done, + +	TP_PROTO(struct snd_soc_card *card, int val), + +	TP_ARGS(card, val) + +); + +DECLARE_EVENT_CLASS(snd_soc_dapm_basic, + +	TP_PROTO(struct snd_soc_card *card), + +	TP_ARGS(card), + +	TP_STRUCT__entry( +		__string(	name,	card->name	) +	), + +	TP_fast_assign( +		__assign_str(name, card->name); +	), + +	TP_printk("card=%s", __get_str(name)) +); + +DEFINE_EVENT(snd_soc_dapm_basic, snd_soc_dapm_start, + +	TP_PROTO(struct snd_soc_card *card), + +	TP_ARGS(card) + +); + +DEFINE_EVENT(snd_soc_dapm_basic, snd_soc_dapm_done, + +	TP_PROTO(struct snd_soc_card *card), + +	TP_ARGS(card) + +); + +DECLARE_EVENT_CLASS(snd_soc_dapm_widget, + +	TP_PROTO(struct snd_soc_dapm_widget *w, int val), + +	TP_ARGS(w, val), + +	TP_STRUCT__entry( +		__string(	name,	w->name		) +		__field(	int,	val		) +	), + +	TP_fast_assign( +		__assign_str(name, w->name); +		__entry->val = val; +	), + +	TP_printk("widget=%s val=%d", __get_str(name), +		  (int)__entry->val) +); + +DEFINE_EVENT(snd_soc_dapm_widget, snd_soc_dapm_widget_power, + +	TP_PROTO(struct snd_soc_dapm_widget *w, int val), + +	TP_ARGS(w, val) + +); + +DEFINE_EVENT(snd_soc_dapm_widget, snd_soc_dapm_widget_event_start, + +	TP_PROTO(struct snd_soc_dapm_widget *w, int val), + +	TP_ARGS(w, val) + +); + +DEFINE_EVENT(snd_soc_dapm_widget, snd_soc_dapm_widget_event_done, + +	TP_PROTO(struct snd_soc_dapm_widget *w, int val), + +	TP_ARGS(w, val) + +); + +TRACE_EVENT(snd_soc_dapm_walk_done, + +	TP_PROTO(struct snd_soc_card *card), + +	TP_ARGS(card), + +	TP_STRUCT__entry( +		__string(	name,	card->name		) +		__field(	int,	power_checks		) +		__field(	int,	path_checks		) +		__field(	int,	neighbour_checks	) +	), + +	TP_fast_assign( +		__assign_str(name, card->name); +		__entry->power_checks = card->dapm_stats.power_checks; +		__entry->path_checks = card->dapm_stats.path_checks; +		__entry->neighbour_checks = card->dapm_stats.neighbour_checks; +	), + +	TP_printk("%s: checks %d power, %d path, %d neighbour", +		  __get_str(name), (int)__entry->power_checks, +		  (int)__entry->path_checks, (int)__entry->neighbour_checks) +); + +TRACE_EVENT(snd_soc_dapm_output_path, + +	TP_PROTO(struct snd_soc_dapm_widget *widget, +		struct snd_soc_dapm_path *path), + +	TP_ARGS(widget, path), + +	TP_STRUCT__entry( +		__string(	wname,	widget->name		) +		__string(	pname,	path->name ? path->name : DAPM_DIRECT) +		__string(	psname,	path->sink->name	) +		__field(	int,	path_sink		) +		__field(	int,	path_connect		) +	), + +	TP_fast_assign( +		__assign_str(wname, widget->name); +		__assign_str(pname, path->name ? path->name : DAPM_DIRECT); +		__assign_str(psname, path->sink->name); +		__entry->path_connect = path->connect; +		__entry->path_sink = (long)path->sink; +	), + +	TP_printk("%c%s -> %s -> %s\n", +		(int) __entry->path_sink && +		(int) __entry->path_connect ? '*' : ' ', +		__get_str(wname), __get_str(pname), __get_str(psname)) +); + +TRACE_EVENT(snd_soc_dapm_input_path, + +	TP_PROTO(struct snd_soc_dapm_widget *widget, +		struct snd_soc_dapm_path *path), + +	TP_ARGS(widget, path), + +	TP_STRUCT__entry( +		__string(	wname,	widget->name		) +		__string(	pname,	path->name ? path->name : DAPM_DIRECT) +		__string(	psname,	path->source->name	) +		__field(	int,	path_source		) +		__field(	int,	path_connect		) +	), + +	TP_fast_assign( +		__assign_str(wname, widget->name); +		__assign_str(pname, path->name ? path->name : DAPM_DIRECT); +		__assign_str(psname, path->source->name); +		__entry->path_connect = path->connect; +		__entry->path_source = (long)path->source; +	), + +	TP_printk("%c%s <- %s <- %s\n", +		(int) __entry->path_source && +		(int) __entry->path_connect ? '*' : ' ', +		__get_str(wname), __get_str(pname), __get_str(psname)) +); + +TRACE_EVENT(snd_soc_dapm_connected, + +	TP_PROTO(int paths, int stream), + +	TP_ARGS(paths, stream), + +	TP_STRUCT__entry( +		__field(	int,	paths		) +		__field(	int,	stream		) +	), + +	TP_fast_assign( +		__entry->paths = paths; +		__entry->stream = stream; +	), + +	TP_printk("%s: found %d paths\n", +		__entry->stream ? "capture" : "playback", __entry->paths) +); + +TRACE_EVENT(snd_soc_jack_irq, + +	TP_PROTO(const char *name), + +	TP_ARGS(name), + +	TP_STRUCT__entry( +		__string(	name,	name		) +	), + +	TP_fast_assign( +		__assign_str(name, name); +	), + +	TP_printk("%s", __get_str(name)) +); + +TRACE_EVENT(snd_soc_jack_report, + +	TP_PROTO(struct snd_soc_jack *jack, int mask, int val), + +	TP_ARGS(jack, mask, val), + +	TP_STRUCT__entry( +		__string(	name,		jack->jack->name	) +		__field(	int,		mask			) +		__field(	int,		val			) +	), + +	TP_fast_assign( +		__assign_str(name, jack->jack->name); +		__entry->mask = mask; +		__entry->val = val; +	), + +	TP_printk("jack=%s %x/%x", __get_str(name), (int)__entry->val, +		  (int)__entry->mask) +); + +TRACE_EVENT(snd_soc_jack_notify, + +	TP_PROTO(struct snd_soc_jack *jack, int val), + +	TP_ARGS(jack, val), + +	TP_STRUCT__entry( +		__string(	name,		jack->jack->name	) +		__field(	int,		val			) +	), + +	TP_fast_assign( +		__assign_str(name, jack->jack->name); +		__entry->val = val; +	), + +	TP_printk("jack=%s %x", __get_str(name), (int)__entry->val) +); + +TRACE_EVENT(snd_soc_cache_sync, + +	TP_PROTO(struct snd_soc_codec *codec, const char *type, +		 const char *status), + +	TP_ARGS(codec, type, status), + +	TP_STRUCT__entry( +		__string(	name,		codec->name	) +		__string(	status,		status		) +		__string(	type,		type		) +		__field(	int,		id		) +	), + +	TP_fast_assign( +		__assign_str(name, codec->name); +		__assign_str(status, status); +		__assign_str(type, type); +		__entry->id = codec->id; +	), + +	TP_printk("codec=%s.%d type=%s status=%s", __get_str(name), +		  (int)__entry->id, __get_str(type), __get_str(status)) +); + +#endif /* _TRACE_ASOC_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/bcache.h b/include/trace/events/bcache.h new file mode 100644 index 00000000000..c9c3c044b32 --- /dev/null +++ b/include/trace/events/bcache.h @@ -0,0 +1,479 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM bcache + +#if !defined(_TRACE_BCACHE_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_BCACHE_H + +#include <linux/tracepoint.h> + +DECLARE_EVENT_CLASS(bcache_request, +	TP_PROTO(struct bcache_device *d, struct bio *bio), +	TP_ARGS(d, bio), + +	TP_STRUCT__entry( +		__field(dev_t,		dev			) +		__field(unsigned int,	orig_major		) +		__field(unsigned int,	orig_minor		) +		__field(sector_t,	sector			) +		__field(dev_t,		orig_sector		) +		__field(unsigned int,	nr_sector		) +		__array(char,		rwbs,	6		) +	), + +	TP_fast_assign( +		__entry->dev		= bio->bi_bdev->bd_dev; +		__entry->orig_major	= d->disk->major; +		__entry->orig_minor	= d->disk->first_minor; +		__entry->sector		= bio->bi_iter.bi_sector; +		__entry->orig_sector	= bio->bi_iter.bi_sector - 16; +		__entry->nr_sector	= bio->bi_iter.bi_size >> 9; +		blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size); +	), + +	TP_printk("%d,%d %s %llu + %u (from %d,%d @ %llu)", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  __entry->rwbs, (unsigned long long)__entry->sector, +		  __entry->nr_sector, __entry->orig_major, __entry->orig_minor, +		  (unsigned long long)__entry->orig_sector) +); + +DECLARE_EVENT_CLASS(bkey, +	TP_PROTO(struct bkey *k), +	TP_ARGS(k), + +	TP_STRUCT__entry( +		__field(u32,	size				) +		__field(u32,	inode				) +		__field(u64,	offset				) +		__field(bool,	dirty				) +	), + +	TP_fast_assign( +		__entry->inode	= KEY_INODE(k); +		__entry->offset	= KEY_OFFSET(k); +		__entry->size	= KEY_SIZE(k); +		__entry->dirty	= KEY_DIRTY(k); +	), + +	TP_printk("%u:%llu len %u dirty %u", __entry->inode, +		  __entry->offset, __entry->size, __entry->dirty) +); + +DECLARE_EVENT_CLASS(btree_node, +	TP_PROTO(struct btree *b), +	TP_ARGS(b), + +	TP_STRUCT__entry( +		__field(size_t,		bucket			) +	), + +	TP_fast_assign( +		__entry->bucket	= PTR_BUCKET_NR(b->c, &b->key, 0); +	), + +	TP_printk("bucket %zu", __entry->bucket) +); + +/* request.c */ + +DEFINE_EVENT(bcache_request, bcache_request_start, +	TP_PROTO(struct bcache_device *d, struct bio *bio), +	TP_ARGS(d, bio) +); + +DEFINE_EVENT(bcache_request, bcache_request_end, +	TP_PROTO(struct bcache_device *d, struct bio *bio), +	TP_ARGS(d, bio) +); + +DECLARE_EVENT_CLASS(bcache_bio, +	TP_PROTO(struct bio *bio), +	TP_ARGS(bio), + +	TP_STRUCT__entry( +		__field(dev_t,		dev			) +		__field(sector_t,	sector			) +		__field(unsigned int,	nr_sector		) +		__array(char,		rwbs,	6		) +	), + +	TP_fast_assign( +		__entry->dev		= bio->bi_bdev->bd_dev; +		__entry->sector		= bio->bi_iter.bi_sector; +		__entry->nr_sector	= bio->bi_iter.bi_size >> 9; +		blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size); +	), + +	TP_printk("%d,%d  %s %llu + %u", +		  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, +		  (unsigned long long)__entry->sector, __entry->nr_sector) +); + +DEFINE_EVENT(bcache_bio, bcache_bypass_sequential, +	TP_PROTO(struct bio *bio), +	TP_ARGS(bio) +); + +DEFINE_EVENT(bcache_bio, bcache_bypass_congested, +	TP_PROTO(struct bio *bio), +	TP_ARGS(bio) +); + +TRACE_EVENT(bcache_read, +	TP_PROTO(struct bio *bio, bool hit, bool bypass), +	TP_ARGS(bio, hit, bypass), + +	TP_STRUCT__entry( +		__field(dev_t,		dev			) +		__field(sector_t,	sector			) +		__field(unsigned int,	nr_sector		) +		__array(char,		rwbs,	6		) +		__field(bool,		cache_hit		) +		__field(bool,		bypass			) +	), + +	TP_fast_assign( +		__entry->dev		= bio->bi_bdev->bd_dev; +		__entry->sector		= bio->bi_iter.bi_sector; +		__entry->nr_sector	= bio->bi_iter.bi_size >> 9; +		blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size); +		__entry->cache_hit = hit; +		__entry->bypass = bypass; +	), + +	TP_printk("%d,%d  %s %llu + %u hit %u bypass %u", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  __entry->rwbs, (unsigned long long)__entry->sector, +		  __entry->nr_sector, __entry->cache_hit, __entry->bypass) +); + +TRACE_EVENT(bcache_write, +	TP_PROTO(struct bio *bio, bool writeback, bool bypass), +	TP_ARGS(bio, writeback, bypass), + +	TP_STRUCT__entry( +		__field(dev_t,		dev			) +		__field(sector_t,	sector			) +		__field(unsigned int,	nr_sector		) +		__array(char,		rwbs,	6		) +		__field(bool,		writeback		) +		__field(bool,		bypass			) +	), + +	TP_fast_assign( +		__entry->dev		= bio->bi_bdev->bd_dev; +		__entry->sector		= bio->bi_iter.bi_sector; +		__entry->nr_sector	= bio->bi_iter.bi_size >> 9; +		blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size); +		__entry->writeback = writeback; +		__entry->bypass = bypass; +	), + +	TP_printk("%d,%d  %s %llu + %u hit %u bypass %u", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  __entry->rwbs, (unsigned long long)__entry->sector, +		  __entry->nr_sector, __entry->writeback, __entry->bypass) +); + +DEFINE_EVENT(bcache_bio, bcache_read_retry, +	TP_PROTO(struct bio *bio), +	TP_ARGS(bio) +); + +DEFINE_EVENT(bkey, bcache_cache_insert, +	TP_PROTO(struct bkey *k), +	TP_ARGS(k) +); + +/* Journal */ + +DECLARE_EVENT_CLASS(cache_set, +	TP_PROTO(struct cache_set *c), +	TP_ARGS(c), + +	TP_STRUCT__entry( +		__array(char,		uuid,	16 ) +	), + +	TP_fast_assign( +		memcpy(__entry->uuid, c->sb.set_uuid, 16); +	), + +	TP_printk("%pU", __entry->uuid) +); + +DEFINE_EVENT(bkey, bcache_journal_replay_key, +	TP_PROTO(struct bkey *k), +	TP_ARGS(k) +); + +DEFINE_EVENT(cache_set, bcache_journal_full, +	TP_PROTO(struct cache_set *c), +	TP_ARGS(c) +); + +DEFINE_EVENT(cache_set, bcache_journal_entry_full, +	TP_PROTO(struct cache_set *c), +	TP_ARGS(c) +); + +DEFINE_EVENT(bcache_bio, bcache_journal_write, +	TP_PROTO(struct bio *bio), +	TP_ARGS(bio) +); + +/* Btree */ + +DEFINE_EVENT(cache_set, bcache_btree_cache_cannibalize, +	TP_PROTO(struct cache_set *c), +	TP_ARGS(c) +); + +DEFINE_EVENT(btree_node, bcache_btree_read, +	TP_PROTO(struct btree *b), +	TP_ARGS(b) +); + +TRACE_EVENT(bcache_btree_write, +	TP_PROTO(struct btree *b), +	TP_ARGS(b), + +	TP_STRUCT__entry( +		__field(size_t,		bucket			) +		__field(unsigned,	block			) +		__field(unsigned,	keys			) +	), + +	TP_fast_assign( +		__entry->bucket	= PTR_BUCKET_NR(b->c, &b->key, 0); +		__entry->block	= b->written; +		__entry->keys	= b->keys.set[b->keys.nsets].data->keys; +	), + +	TP_printk("bucket %zu", __entry->bucket) +); + +DEFINE_EVENT(btree_node, bcache_btree_node_alloc, +	TP_PROTO(struct btree *b), +	TP_ARGS(b) +); + +DEFINE_EVENT(btree_node, bcache_btree_node_alloc_fail, +	TP_PROTO(struct btree *b), +	TP_ARGS(b) +); + +DEFINE_EVENT(btree_node, bcache_btree_node_free, +	TP_PROTO(struct btree *b), +	TP_ARGS(b) +); + +TRACE_EVENT(bcache_btree_gc_coalesce, +	TP_PROTO(unsigned nodes), +	TP_ARGS(nodes), + +	TP_STRUCT__entry( +		__field(unsigned,	nodes			) +	), + +	TP_fast_assign( +		__entry->nodes	= nodes; +	), + +	TP_printk("coalesced %u nodes", __entry->nodes) +); + +DEFINE_EVENT(cache_set, bcache_gc_start, +	TP_PROTO(struct cache_set *c), +	TP_ARGS(c) +); + +DEFINE_EVENT(cache_set, bcache_gc_end, +	TP_PROTO(struct cache_set *c), +	TP_ARGS(c) +); + +DEFINE_EVENT(bkey, bcache_gc_copy, +	TP_PROTO(struct bkey *k), +	TP_ARGS(k) +); + +DEFINE_EVENT(bkey, bcache_gc_copy_collision, +	TP_PROTO(struct bkey *k), +	TP_ARGS(k) +); + +TRACE_EVENT(bcache_btree_insert_key, +	TP_PROTO(struct btree *b, struct bkey *k, unsigned op, unsigned status), +	TP_ARGS(b, k, op, status), + +	TP_STRUCT__entry( +		__field(u64,	btree_node			) +		__field(u32,	btree_level			) +		__field(u32,	inode				) +		__field(u64,	offset				) +		__field(u32,	size				) +		__field(u8,	dirty				) +		__field(u8,	op				) +		__field(u8,	status				) +	), + +	TP_fast_assign( +		__entry->btree_node = PTR_BUCKET_NR(b->c, &b->key, 0); +		__entry->btree_level = b->level; +		__entry->inode	= KEY_INODE(k); +		__entry->offset	= KEY_OFFSET(k); +		__entry->size	= KEY_SIZE(k); +		__entry->dirty	= KEY_DIRTY(k); +		__entry->op = op; +		__entry->status = status; +	), + +	TP_printk("%u for %u at %llu(%u): %u:%llu len %u dirty %u", +		  __entry->status, __entry->op, +		  __entry->btree_node, __entry->btree_level, +		  __entry->inode, __entry->offset, +		  __entry->size, __entry->dirty) +); + +DECLARE_EVENT_CLASS(btree_split, +	TP_PROTO(struct btree *b, unsigned keys), +	TP_ARGS(b, keys), + +	TP_STRUCT__entry( +		__field(size_t,		bucket			) +		__field(unsigned,	keys			) +	), + +	TP_fast_assign( +		__entry->bucket	= PTR_BUCKET_NR(b->c, &b->key, 0); +		__entry->keys	= keys; +	), + +	TP_printk("bucket %zu keys %u", __entry->bucket, __entry->keys) +); + +DEFINE_EVENT(btree_split, bcache_btree_node_split, +	TP_PROTO(struct btree *b, unsigned keys), +	TP_ARGS(b, keys) +); + +DEFINE_EVENT(btree_split, bcache_btree_node_compact, +	TP_PROTO(struct btree *b, unsigned keys), +	TP_ARGS(b, keys) +); + +DEFINE_EVENT(btree_node, bcache_btree_set_root, +	TP_PROTO(struct btree *b), +	TP_ARGS(b) +); + +TRACE_EVENT(bcache_keyscan, +	TP_PROTO(unsigned nr_found, +		 unsigned start_inode, uint64_t start_offset, +		 unsigned end_inode, uint64_t end_offset), +	TP_ARGS(nr_found, +		start_inode, start_offset, +		end_inode, end_offset), + +	TP_STRUCT__entry( +		__field(__u32,	nr_found			) +		__field(__u32,	start_inode			) +		__field(__u64,	start_offset			) +		__field(__u32,	end_inode			) +		__field(__u64,	end_offset			) +	), + +	TP_fast_assign( +		__entry->nr_found	= nr_found; +		__entry->start_inode	= start_inode; +		__entry->start_offset	= start_offset; +		__entry->end_inode	= end_inode; +		__entry->end_offset	= end_offset; +	), + +	TP_printk("found %u keys from %u:%llu to %u:%llu", __entry->nr_found, +		  __entry->start_inode, __entry->start_offset, +		  __entry->end_inode, __entry->end_offset) +); + +/* Allocator */ + +TRACE_EVENT(bcache_invalidate, +	TP_PROTO(struct cache *ca, size_t bucket), +	TP_ARGS(ca, bucket), + +	TP_STRUCT__entry( +		__field(unsigned,	sectors			) +		__field(dev_t,		dev			) +		__field(__u64,		offset			) +	), + +	TP_fast_assign( +		__entry->dev		= ca->bdev->bd_dev; +		__entry->offset		= bucket << ca->set->bucket_bits; +		__entry->sectors	= GC_SECTORS_USED(&ca->buckets[bucket]); +	), + +	TP_printk("invalidated %u sectors at %d,%d sector=%llu", +		  __entry->sectors, MAJOR(__entry->dev), +		  MINOR(__entry->dev), __entry->offset) +); + +TRACE_EVENT(bcache_alloc, +	TP_PROTO(struct cache *ca, size_t bucket), +	TP_ARGS(ca, bucket), + +	TP_STRUCT__entry( +		__field(dev_t,		dev			) +		__field(__u64,		offset			) +	), + +	TP_fast_assign( +		__entry->dev		= ca->bdev->bd_dev; +		__entry->offset		= bucket << ca->set->bucket_bits; +	), + +	TP_printk("allocated %d,%d sector=%llu", MAJOR(__entry->dev), +		  MINOR(__entry->dev), __entry->offset) +); + +TRACE_EVENT(bcache_alloc_fail, +	TP_PROTO(struct cache *ca, unsigned reserve), +	TP_ARGS(ca, reserve), + +	TP_STRUCT__entry( +		__field(dev_t,		dev			) +		__field(unsigned,	free			) +		__field(unsigned,	free_inc		) +		__field(unsigned,	blocked			) +	), + +	TP_fast_assign( +		__entry->dev		= ca->bdev->bd_dev; +		__entry->free		= fifo_used(&ca->free[reserve]); +		__entry->free_inc	= fifo_used(&ca->free_inc); +		__entry->blocked	= atomic_read(&ca->set->prio_blocked); +	), + +	TP_printk("alloc fail %d,%d free %u free_inc %u blocked %u", +		  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->free, +		  __entry->free_inc, __entry->blocked) +); + +/* Background writeback */ + +DEFINE_EVENT(bkey, bcache_writeback, +	TP_PROTO(struct bkey *k), +	TP_ARGS(k) +); + +DEFINE_EVENT(bkey, bcache_writeback_collision, +	TP_PROTO(struct bkey *k), +	TP_ARGS(k) +); + +#endif /* _TRACE_BCACHE_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/bkl.h b/include/trace/events/bkl.h deleted file mode 100644 index 1af72dc2427..00000000000 --- a/include/trace/events/bkl.h +++ /dev/null @@ -1,61 +0,0 @@ -#undef TRACE_SYSTEM -#define TRACE_SYSTEM bkl - -#if !defined(_TRACE_BKL_H) || defined(TRACE_HEADER_MULTI_READ) -#define _TRACE_BKL_H - -#include <linux/tracepoint.h> - -TRACE_EVENT(lock_kernel, - -	TP_PROTO(const char *func, const char *file, int line), - -	TP_ARGS(func, file, line), - -	TP_STRUCT__entry( -		__field(	int,		depth			) -		__field_ext(	const char *,	func, FILTER_PTR_STRING	) -		__field_ext(	const char *,	file, FILTER_PTR_STRING	) -		__field(	int,		line			) -	), - -	TP_fast_assign( -		/* We want to record the lock_depth after lock is acquired */ -		__entry->depth = current->lock_depth + 1; -		__entry->func = func; -		__entry->file = file; -		__entry->line = line; -	), - -	TP_printk("depth=%d file:line=%s:%d func=%s()", __entry->depth, -		  __entry->file, __entry->line, __entry->func) -); - -TRACE_EVENT(unlock_kernel, - -	TP_PROTO(const char *func, const char *file, int line), - -	TP_ARGS(func, file, line), - -	TP_STRUCT__entry( -		__field(int,		depth		) -		__field(const char *,	func		) -		__field(const char *,	file		) -		__field(int,		line		) -	), - -	TP_fast_assign( -		__entry->depth = current->lock_depth; -		__entry->func = func; -		__entry->file = file; -		__entry->line = line; -	), - -	TP_printk("depth=%d file:line=%s:%d func=%s()", __entry->depth, -		  __entry->file, __entry->line, __entry->func) -); - -#endif /* _TRACE_BKL_H */ - -/* This part must be outside protection */ -#include <trace/define_trace.h> diff --git a/include/trace/events/block.h b/include/trace/events/block.h index d8ce278515c..e8a5eca1dbe 100644 --- a/include/trace/events/block.h +++ b/include/trace/events/block.h @@ -6,8 +6,61 @@  #include <linux/blktrace_api.h>  #include <linux/blkdev.h> +#include <linux/buffer_head.h>  #include <linux/tracepoint.h> +#define RWBS_LEN	8 + +DECLARE_EVENT_CLASS(block_buffer, + +	TP_PROTO(struct buffer_head *bh), + +	TP_ARGS(bh), + +	TP_STRUCT__entry ( +		__field(  dev_t,	dev			) +		__field(  sector_t,	sector			) +		__field(  size_t,	size			) +	), + +	TP_fast_assign( +		__entry->dev		= bh->b_bdev->bd_dev; +		__entry->sector		= bh->b_blocknr; +		__entry->size		= bh->b_size; +	), + +	TP_printk("%d,%d sector=%llu size=%zu", +		MAJOR(__entry->dev), MINOR(__entry->dev), +		(unsigned long long)__entry->sector, __entry->size +	) +); + +/** + * block_touch_buffer - mark a buffer accessed + * @bh: buffer_head being touched + * + * Called from touch_buffer(). + */ +DEFINE_EVENT(block_buffer, block_touch_buffer, + +	TP_PROTO(struct buffer_head *bh), + +	TP_ARGS(bh) +); + +/** + * block_dirty_buffer - mark a buffer dirty + * @bh: buffer_head being dirtied + * + * Called from mark_buffer_dirty(). + */ +DEFINE_EVENT(block_buffer, block_dirty_buffer, + +	TP_PROTO(struct buffer_head *bh), + +	TP_ARGS(bh) +); +  DECLARE_EVENT_CLASS(block_rq_with_error,  	TP_PROTO(struct request_queue *q, struct request *rq), @@ -19,7 +72,7 @@ DECLARE_EVENT_CLASS(block_rq_with_error,  		__field(  sector_t,	sector			)  		__field(  unsigned int,	nr_sector		)  		__field(  int,		errors			) -		__array(  char,		rwbs,	6		) +		__array(  char,		rwbs,	RWBS_LEN	)  		__dynamic_array( char,	cmd,	blk_cmd_buf_len(rq)	)  	), @@ -31,7 +84,7 @@ DECLARE_EVENT_CLASS(block_rq_with_error,  					0 : blk_rq_sectors(rq);  		__entry->errors    = rq->errors; -		blk_fill_rwbs_rq(__entry->rwbs, rq); +		blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));  		blk_dump_cmd(__get_str(cmd), rq);  	), @@ -79,6 +132,7 @@ DEFINE_EVENT(block_rq_with_error, block_rq_requeue,   * block_rq_complete - block IO operation completed by device driver   * @q: queue containing the block operation request   * @rq: block operations request + * @nr_bytes: number of completed bytes   *   * The block_rq_complete tracepoint event indicates that some portion   * of operation request has been completed by the device driver.  If @@ -86,11 +140,37 @@ DEFINE_EVENT(block_rq_with_error, block_rq_requeue,   * do for the request. If @rq->bio is non-NULL then there is   * additional work required to complete the request.   */ -DEFINE_EVENT(block_rq_with_error, block_rq_complete, +TRACE_EVENT(block_rq_complete, -	TP_PROTO(struct request_queue *q, struct request *rq), +	TP_PROTO(struct request_queue *q, struct request *rq, +		 unsigned int nr_bytes), -	TP_ARGS(q, rq) +	TP_ARGS(q, rq, nr_bytes), + +	TP_STRUCT__entry( +		__field(  dev_t,	dev			) +		__field(  sector_t,	sector			) +		__field(  unsigned int,	nr_sector		) +		__field(  int,		errors			) +		__array(  char,		rwbs,	RWBS_LEN	) +		__dynamic_array( char,	cmd,	blk_cmd_buf_len(rq)	) +	), + +	TP_fast_assign( +		__entry->dev	   = rq->rq_disk ? disk_devt(rq->rq_disk) : 0; +		__entry->sector    = blk_rq_pos(rq); +		__entry->nr_sector = nr_bytes >> 9; +		__entry->errors    = rq->errors; + +		blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, nr_bytes); +		blk_dump_cmd(__get_str(cmd), rq); +	), + +	TP_printk("%d,%d %s (%s) %llu + %u [%d]", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  __entry->rwbs, __get_str(cmd), +		  (unsigned long long)__entry->sector, +		  __entry->nr_sector, __entry->errors)  );  DECLARE_EVENT_CLASS(block_rq, @@ -104,7 +184,7 @@ DECLARE_EVENT_CLASS(block_rq,  		__field(  sector_t,	sector			)  		__field(  unsigned int,	nr_sector		)  		__field(  unsigned int,	bytes			) -		__array(  char,		rwbs,	6		) +		__array(  char,		rwbs,	RWBS_LEN	)  		__array(  char,         comm,   TASK_COMM_LEN   )  		__dynamic_array( char,	cmd,	blk_cmd_buf_len(rq)	)  	), @@ -118,7 +198,7 @@ DECLARE_EVENT_CLASS(block_rq,  		__entry->bytes     = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?  					blk_rq_bytes(rq) : 0; -		blk_fill_rwbs_rq(__entry->rwbs, rq); +		blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));  		blk_dump_cmd(__get_str(cmd), rq);  		memcpy(__entry->comm, current->comm, TASK_COMM_LEN);  	), @@ -183,16 +263,16 @@ TRACE_EVENT(block_bio_bounce,  		__field( dev_t,		dev			)  		__field( sector_t,	sector			)  		__field( unsigned int,	nr_sector		) -		__array( char,		rwbs,	6		) +		__array( char,		rwbs,	RWBS_LEN	)  		__array( char,		comm,	TASK_COMM_LEN	)  	),  	TP_fast_assign(  		__entry->dev		= bio->bi_bdev ?  					  bio->bi_bdev->bd_dev : 0; -		__entry->sector		= bio->bi_sector; -		__entry->nr_sector	= bio->bi_size >> 9; -		blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); +		__entry->sector		= bio->bi_iter.bi_sector; +		__entry->nr_sector	= bio_sectors(bio); +		blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);  		memcpy(__entry->comm, current->comm, TASK_COMM_LEN);  	), @@ -206,29 +286,31 @@ TRACE_EVENT(block_bio_bounce,   * block_bio_complete - completed all work on the block operation   * @q: queue holding the block operation   * @bio: block operation completed + * @error: io error value   *   * This tracepoint indicates there is no further work to do on this   * block IO operation @bio.   */  TRACE_EVENT(block_bio_complete, -	TP_PROTO(struct request_queue *q, struct bio *bio), +	TP_PROTO(struct request_queue *q, struct bio *bio, int error), -	TP_ARGS(q, bio), +	TP_ARGS(q, bio, error),  	TP_STRUCT__entry(  		__field( dev_t,		dev		)  		__field( sector_t,	sector		)  		__field( unsigned,	nr_sector	)  		__field( int,		error		) -		__array( char,		rwbs,	6	) +		__array( char,		rwbs,	RWBS_LEN)  	),  	TP_fast_assign(  		__entry->dev		= bio->bi_bdev->bd_dev; -		__entry->sector		= bio->bi_sector; -		__entry->nr_sector	= bio->bi_size >> 9; -		blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); +		__entry->sector		= bio->bi_iter.bi_sector; +		__entry->nr_sector	= bio_sectors(bio); +		__entry->error		= error; +		blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);  	),  	TP_printk("%d,%d %s %llu + %u [%d]", @@ -237,25 +319,25 @@ TRACE_EVENT(block_bio_complete,  		  __entry->nr_sector, __entry->error)  ); -DECLARE_EVENT_CLASS(block_bio, +DECLARE_EVENT_CLASS(block_bio_merge, -	TP_PROTO(struct request_queue *q, struct bio *bio), +	TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio), -	TP_ARGS(q, bio), +	TP_ARGS(q, rq, bio),  	TP_STRUCT__entry(  		__field( dev_t,		dev			)  		__field( sector_t,	sector			)  		__field( unsigned int,	nr_sector		) -		__array( char,		rwbs,	6		) +		__array( char,		rwbs,	RWBS_LEN	)  		__array( char,		comm,	TASK_COMM_LEN	)  	),  	TP_fast_assign(  		__entry->dev		= bio->bi_bdev->bd_dev; -		__entry->sector		= bio->bi_sector; -		__entry->nr_sector	= bio->bi_size >> 9; -		blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); +		__entry->sector		= bio->bi_iter.bi_sector; +		__entry->nr_sector	= bio_sectors(bio); +		blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);  		memcpy(__entry->comm, current->comm, TASK_COMM_LEN);  	), @@ -268,31 +350,33 @@ DECLARE_EVENT_CLASS(block_bio,  /**   * block_bio_backmerge - merging block operation to the end of an existing operation   * @q: queue holding operation + * @rq: request bio is being merged into   * @bio: new block operation to merge   *   * Merging block request @bio to the end of an existing block request   * in queue @q.   */ -DEFINE_EVENT(block_bio, block_bio_backmerge, +DEFINE_EVENT(block_bio_merge, block_bio_backmerge, -	TP_PROTO(struct request_queue *q, struct bio *bio), +	TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio), -	TP_ARGS(q, bio) +	TP_ARGS(q, rq, bio)  );  /**   * block_bio_frontmerge - merging block operation to the beginning of an existing operation   * @q: queue holding operation + * @rq: request bio is being merged into   * @bio: new block operation to merge   *   * Merging block IO operation @bio to the beginning of an existing block   * operation in queue @q.   */ -DEFINE_EVENT(block_bio, block_bio_frontmerge, +DEFINE_EVENT(block_bio_merge, block_bio_frontmerge, -	TP_PROTO(struct request_queue *q, struct bio *bio), +	TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio), -	TP_ARGS(q, bio) +	TP_ARGS(q, rq, bio)  );  /** @@ -302,11 +386,32 @@ DEFINE_EVENT(block_bio, block_bio_frontmerge,   *   * About to place the block IO operation @bio into queue @q.   */ -DEFINE_EVENT(block_bio, block_bio_queue, +TRACE_EVENT(block_bio_queue,  	TP_PROTO(struct request_queue *q, struct bio *bio), -	TP_ARGS(q, bio) +	TP_ARGS(q, bio), + +	TP_STRUCT__entry( +		__field( dev_t,		dev			) +		__field( sector_t,	sector			) +		__field( unsigned int,	nr_sector		) +		__array( char,		rwbs,	RWBS_LEN	) +		__array( char,		comm,	TASK_COMM_LEN	) +	), + +	TP_fast_assign( +		__entry->dev		= bio->bi_bdev->bd_dev; +		__entry->sector		= bio->bi_iter.bi_sector; +		__entry->nr_sector	= bio_sectors(bio); +		blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size); +		memcpy(__entry->comm, current->comm, TASK_COMM_LEN); +	), + +	TP_printk("%d,%d %s %llu + %u [%s]", +		  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, +		  (unsigned long long)__entry->sector, +		  __entry->nr_sector, __entry->comm)  );  DECLARE_EVENT_CLASS(block_get_rq, @@ -319,14 +424,14 @@ DECLARE_EVENT_CLASS(block_get_rq,  		__field( dev_t,		dev			)  		__field( sector_t,	sector			)  		__field( unsigned int,	nr_sector		) -		__array( char,		rwbs,	6		) +		__array( char,		rwbs,	RWBS_LEN	)  		__array( char,		comm,	TASK_COMM_LEN	)          ),  	TP_fast_assign(  		__entry->dev		= bio ? bio->bi_bdev->bd_dev : 0; -		__entry->sector		= bio ? bio->bi_sector : 0; -		__entry->nr_sector	= bio ? bio->bi_size >> 9 : 0; +		__entry->sector		= bio ? bio->bi_iter.bi_sector : 0; +		__entry->nr_sector	= bio ? bio_sectors(bio) : 0;  		blk_fill_rwbs(__entry->rwbs,  			      bio ? bio->bi_rw : 0, __entry->nr_sector);  		memcpy(__entry->comm, current->comm, TASK_COMM_LEN); @@ -399,9 +504,9 @@ TRACE_EVENT(block_plug,  DECLARE_EVENT_CLASS(block_unplug, -	TP_PROTO(struct request_queue *q), +	TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit), -	TP_ARGS(q), +	TP_ARGS(q, depth, explicit),  	TP_STRUCT__entry(  		__field( int,		nr_rq			) @@ -409,7 +514,7 @@ DECLARE_EVENT_CLASS(block_unplug,  	),  	TP_fast_assign( -		__entry->nr_rq	= q->rq.count[READ] + q->rq.count[WRITE]; +		__entry->nr_rq = depth;  		memcpy(__entry->comm, current->comm, TASK_COMM_LEN);  	), @@ -417,31 +522,19 @@ DECLARE_EVENT_CLASS(block_unplug,  );  /** - * block_unplug_timer - timed release of operations requests in queue to device driver - * @q: request queue to unplug - * - * Unplug the request queue @q because a timer expired and allow block - * operation requests to be sent to the device driver. - */ -DEFINE_EVENT(block_unplug, block_unplug_timer, - -	TP_PROTO(struct request_queue *q), - -	TP_ARGS(q) -); - -/** - * block_unplug_io - release of operations requests in request queue + * block_unplug - release of operations requests in request queue   * @q: request queue to unplug + * @depth: number of requests just added to the queue + * @explicit: whether this was an explicit unplug, or one from schedule()   *   * Unplug request queue @q because device driver is scheduled to work   * on elements in the request queue.   */ -DEFINE_EVENT(block_unplug, block_unplug_io, +DEFINE_EVENT(block_unplug, block_unplug, -	TP_PROTO(struct request_queue *q), +	TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit), -	TP_ARGS(q) +	TP_ARGS(q, depth, explicit)  );  /** @@ -466,15 +559,15 @@ TRACE_EVENT(block_split,  		__field( dev_t,		dev				)  		__field( sector_t,	sector				)  		__field( sector_t,	new_sector			) -		__array( char,		rwbs,		6		) +		__array( char,		rwbs,		RWBS_LEN	)  		__array( char,		comm,		TASK_COMM_LEN	)  	),  	TP_fast_assign(  		__entry->dev		= bio->bi_bdev->bd_dev; -		__entry->sector		= bio->bi_sector; +		__entry->sector		= bio->bi_iter.bi_sector;  		__entry->new_sector	= new_sector; -		blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); +		blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);  		memcpy(__entry->comm, current->comm, TASK_COMM_LEN);  	), @@ -486,16 +579,16 @@ TRACE_EVENT(block_split,  );  /** - * block_remap - map request for a partition to the raw device + * block_bio_remap - map request for a logical device to the raw device   * @q: queue holding the operation   * @bio: revised operation   * @dev: device for the operation   * @from: original sector for the operation   * - * An operation for a partition on a block device has been mapped to the + * An operation for a logical device has been mapped to the   * raw block device.   */ -TRACE_EVENT(block_remap, +TRACE_EVENT(block_bio_remap,  	TP_PROTO(struct request_queue *q, struct bio *bio, dev_t dev,  		 sector_t from), @@ -508,16 +601,16 @@ TRACE_EVENT(block_remap,  		__field( unsigned int,	nr_sector	)  		__field( dev_t,		old_dev		)  		__field( sector_t,	old_sector	) -		__array( char,		rwbs,	6	) +		__array( char,		rwbs,	RWBS_LEN)  	),  	TP_fast_assign(  		__entry->dev		= bio->bi_bdev->bd_dev; -		__entry->sector		= bio->bi_sector; -		__entry->nr_sector	= bio->bi_size >> 9; +		__entry->sector		= bio->bi_iter.bi_sector; +		__entry->nr_sector	= bio_sectors(bio);  		__entry->old_dev	= dev;  		__entry->old_sector	= from; -		blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); +		blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);  	),  	TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu", @@ -552,7 +645,8 @@ TRACE_EVENT(block_rq_remap,  		__field( unsigned int,	nr_sector	)  		__field( dev_t,		old_dev		)  		__field( sector_t,	old_sector	) -		__array( char,		rwbs,	6	) +		__field( unsigned int,	nr_bios		) +		__array( char,		rwbs,	RWBS_LEN)  	),  	TP_fast_assign( @@ -561,15 +655,16 @@ TRACE_EVENT(block_rq_remap,  		__entry->nr_sector	= blk_rq_sectors(rq);  		__entry->old_dev	= dev;  		__entry->old_sector	= from; -		blk_fill_rwbs_rq(__entry->rwbs, rq); +		__entry->nr_bios	= blk_rq_count_bios(rq); +		blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));  	), -	TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu", +	TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu %u",  		  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,  		  (unsigned long long)__entry->sector,  		  __entry->nr_sector,  		  MAJOR(__entry->old_dev), MINOR(__entry->old_dev), -		  (unsigned long long)__entry->old_sector) +		  (unsigned long long)__entry->old_sector, __entry->nr_bios)  );  #endif /* _TRACE_BLOCK_H */ diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h new file mode 100644 index 00000000000..4ee4e30d26d --- /dev/null +++ b/include/trace/events/btrfs.h @@ -0,0 +1,1125 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM btrfs + +#if !defined(_TRACE_BTRFS_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_BTRFS_H + +#include <linux/writeback.h> +#include <linux/tracepoint.h> +#include <trace/events/gfpflags.h> + +struct btrfs_root; +struct btrfs_fs_info; +struct btrfs_inode; +struct extent_map; +struct btrfs_ordered_extent; +struct btrfs_delayed_ref_node; +struct btrfs_delayed_tree_ref; +struct btrfs_delayed_data_ref; +struct btrfs_delayed_ref_head; +struct btrfs_block_group_cache; +struct btrfs_free_cluster; +struct map_lookup; +struct extent_buffer; +struct btrfs_work; +struct __btrfs_workqueue; + +#define show_ref_type(type)						\ +	__print_symbolic(type,						\ +		{ BTRFS_TREE_BLOCK_REF_KEY, 	"TREE_BLOCK_REF" },	\ +		{ BTRFS_EXTENT_DATA_REF_KEY, 	"EXTENT_DATA_REF" },	\ +		{ BTRFS_EXTENT_REF_V0_KEY, 	"EXTENT_REF_V0" },	\ +		{ BTRFS_SHARED_BLOCK_REF_KEY, 	"SHARED_BLOCK_REF" },	\ +		{ BTRFS_SHARED_DATA_REF_KEY, 	"SHARED_DATA_REF" }) + +#define __show_root_type(obj)						\ +	__print_symbolic_u64(obj,					\ +		{ BTRFS_ROOT_TREE_OBJECTID, 	"ROOT_TREE"	},	\ +		{ BTRFS_EXTENT_TREE_OBJECTID, 	"EXTENT_TREE"	},	\ +		{ BTRFS_CHUNK_TREE_OBJECTID, 	"CHUNK_TREE"	},	\ +		{ BTRFS_DEV_TREE_OBJECTID, 	"DEV_TREE"	},	\ +		{ BTRFS_FS_TREE_OBJECTID, 	"FS_TREE"	},	\ +		{ BTRFS_ROOT_TREE_DIR_OBJECTID, "ROOT_TREE_DIR"	},	\ +		{ BTRFS_CSUM_TREE_OBJECTID, 	"CSUM_TREE"	},	\ +		{ BTRFS_TREE_LOG_OBJECTID,	"TREE_LOG"	},	\ +		{ BTRFS_QUOTA_TREE_OBJECTID,	"QUOTA_TREE"	},	\ +		{ BTRFS_TREE_RELOC_OBJECTID,	"TREE_RELOC"	},	\ +		{ BTRFS_UUID_TREE_OBJECTID,	"UUID_RELOC"	},	\ +		{ BTRFS_DATA_RELOC_TREE_OBJECTID, "DATA_RELOC_TREE" }) + +#define show_root_type(obj)						\ +	obj, ((obj >= BTRFS_DATA_RELOC_TREE_OBJECTID) ||		\ +	      (obj >= BTRFS_ROOT_TREE_OBJECTID &&			\ +	       obj <= BTRFS_QUOTA_TREE_OBJECTID)) ? __show_root_type(obj) : "-" + +#define BTRFS_GROUP_FLAGS	\ +	{ BTRFS_BLOCK_GROUP_DATA,	"DATA"},	\ +	{ BTRFS_BLOCK_GROUP_SYSTEM,	"SYSTEM"},	\ +	{ BTRFS_BLOCK_GROUP_METADATA,	"METADATA"},	\ +	{ BTRFS_BLOCK_GROUP_RAID0,	"RAID0"}, 	\ +	{ BTRFS_BLOCK_GROUP_RAID1,	"RAID1"}, 	\ +	{ BTRFS_BLOCK_GROUP_DUP,	"DUP"}, 	\ +	{ BTRFS_BLOCK_GROUP_RAID10,	"RAID10"}, 	\ +	{ BTRFS_BLOCK_GROUP_RAID5,	"RAID5"},	\ +	{ BTRFS_BLOCK_GROUP_RAID6,	"RAID6"} + +#define BTRFS_UUID_SIZE 16 + +TRACE_EVENT(btrfs_transaction_commit, + +	TP_PROTO(struct btrfs_root *root), + +	TP_ARGS(root), + +	TP_STRUCT__entry( +		__field(	u64,  generation		) +		__field(	u64,  root_objectid		) +	), + +	TP_fast_assign( +		__entry->generation	= root->fs_info->generation; +		__entry->root_objectid	= root->root_key.objectid; +	), + +	TP_printk("root = %llu(%s), gen = %llu", +		  show_root_type(__entry->root_objectid), +		  (unsigned long long)__entry->generation) +); + +DECLARE_EVENT_CLASS(btrfs__inode, + +	TP_PROTO(struct inode *inode), + +	TP_ARGS(inode), + +	TP_STRUCT__entry( +		__field(	ino_t,  ino			) +		__field(	blkcnt_t,  blocks		) +		__field(	u64,  disk_i_size		) +		__field(	u64,  generation		) +		__field(	u64,  last_trans		) +		__field(	u64,  logged_trans		) +		__field(	u64,  root_objectid		) +	), + +	TP_fast_assign( +		__entry->ino	= inode->i_ino; +		__entry->blocks	= inode->i_blocks; +		__entry->disk_i_size  = BTRFS_I(inode)->disk_i_size; +		__entry->generation = BTRFS_I(inode)->generation; +		__entry->last_trans = BTRFS_I(inode)->last_trans; +		__entry->logged_trans = BTRFS_I(inode)->logged_trans; +		__entry->root_objectid = +				BTRFS_I(inode)->root->root_key.objectid; +	), + +	TP_printk("root = %llu(%s), gen = %llu, ino = %lu, blocks = %llu, " +		  "disk_i_size = %llu, last_trans = %llu, logged_trans = %llu", +		  show_root_type(__entry->root_objectid), +		  (unsigned long long)__entry->generation, +		  (unsigned long)__entry->ino, +		  (unsigned long long)__entry->blocks, +		  (unsigned long long)__entry->disk_i_size, +		  (unsigned long long)__entry->last_trans, +		  (unsigned long long)__entry->logged_trans) +); + +DEFINE_EVENT(btrfs__inode, btrfs_inode_new, + +	TP_PROTO(struct inode *inode), + +	TP_ARGS(inode) +); + +DEFINE_EVENT(btrfs__inode, btrfs_inode_request, + +	TP_PROTO(struct inode *inode), + +	TP_ARGS(inode) +); + +DEFINE_EVENT(btrfs__inode, btrfs_inode_evict, + +	TP_PROTO(struct inode *inode), + +	TP_ARGS(inode) +); + +#define __show_map_type(type)						\ +	__print_symbolic_u64(type,					\ +		{ EXTENT_MAP_LAST_BYTE, "LAST_BYTE" 	},		\ +		{ EXTENT_MAP_HOLE, 	"HOLE" 		},		\ +		{ EXTENT_MAP_INLINE, 	"INLINE" 	},		\ +		{ EXTENT_MAP_DELALLOC,	"DELALLOC" 	}) + +#define show_map_type(type)			\ +	type, (type >= EXTENT_MAP_LAST_BYTE) ? "-" :  __show_map_type(type) + +#define show_map_flags(flag)						\ +	__print_flags(flag, "|",					\ +		{ EXTENT_FLAG_PINNED, 		"PINNED" 	},	\ +		{ EXTENT_FLAG_COMPRESSED, 	"COMPRESSED" 	},	\ +		{ EXTENT_FLAG_VACANCY, 		"VACANCY" 	},	\ +		{ EXTENT_FLAG_PREALLOC, 	"PREALLOC" 	},	\ +		{ EXTENT_FLAG_LOGGING,	 	"LOGGING" 	},	\ +		{ EXTENT_FLAG_FILLING,	 	"FILLING" 	}) + +TRACE_EVENT_CONDITION(btrfs_get_extent, + +	TP_PROTO(struct btrfs_root *root, struct extent_map *map), + +	TP_ARGS(root, map), + +	TP_CONDITION(map), + +	TP_STRUCT__entry( +		__field(	u64,  root_objectid	) +		__field(	u64,  start		) +		__field(	u64,  len		) +		__field(	u64,  orig_start	) +		__field(	u64,  block_start	) +		__field(	u64,  block_len		) +		__field(	unsigned long,  flags	) +		__field(	int,  refs		) +		__field(	unsigned int,  compress_type	) +	), + +	TP_fast_assign( +		__entry->root_objectid	= root->root_key.objectid; +		__entry->start 		= map->start; +		__entry->len		= map->len; +		__entry->orig_start	= map->orig_start; +		__entry->block_start	= map->block_start; +		__entry->block_len	= map->block_len; +		__entry->flags		= map->flags; +		__entry->refs		= atomic_read(&map->refs); +		__entry->compress_type	= map->compress_type; +	), + +	TP_printk("root = %llu(%s), start = %llu, len = %llu, " +		  "orig_start = %llu, block_start = %llu(%s), " +		  "block_len = %llu, flags = %s, refs = %u, " +		  "compress_type = %u", +		  show_root_type(__entry->root_objectid), +		  (unsigned long long)__entry->start, +		  (unsigned long long)__entry->len, +		  (unsigned long long)__entry->orig_start, +		  show_map_type(__entry->block_start), +		  (unsigned long long)__entry->block_len, +		  show_map_flags(__entry->flags), +		  __entry->refs, __entry->compress_type) +); + +#define show_ordered_flags(flags)					   \ +	__print_flags(flags, "|",					   \ +		{ (1 << BTRFS_ORDERED_IO_DONE), 	"IO_DONE" 	}, \ +		{ (1 << BTRFS_ORDERED_COMPLETE), 	"COMPLETE" 	}, \ +		{ (1 << BTRFS_ORDERED_NOCOW), 		"NOCOW" 	}, \ +		{ (1 << BTRFS_ORDERED_COMPRESSED), 	"COMPRESSED" 	}, \ +		{ (1 << BTRFS_ORDERED_PREALLOC), 	"PREALLOC" 	}, \ +		{ (1 << BTRFS_ORDERED_DIRECT),	 	"DIRECT" 	}, \ +		{ (1 << BTRFS_ORDERED_IOERR), 		"IOERR" 	}, \ +		{ (1 << BTRFS_ORDERED_UPDATED_ISIZE), 	"UPDATED_ISIZE"	}, \ +		{ (1 << BTRFS_ORDERED_LOGGED_CSUM), 	"LOGGED_CSUM"	}, \ +		{ (1 << BTRFS_ORDERED_TRUNCATED), 	"TRUNCATED"	}) + + +DECLARE_EVENT_CLASS(btrfs__ordered_extent, + +	TP_PROTO(struct inode *inode, struct btrfs_ordered_extent *ordered), + +	TP_ARGS(inode, ordered), + +	TP_STRUCT__entry( +		__field(	ino_t,  ino		) +		__field(	u64,  file_offset	) +		__field(	u64,  start		) +		__field(	u64,  len		) +		__field(	u64,  disk_len		) +		__field(	u64,  bytes_left	) +		__field(	unsigned long,  flags	) +		__field(	int,  compress_type	) +		__field(	int,  refs		) +		__field(	u64,  root_objectid	) +	), + +	TP_fast_assign( +		__entry->ino 		= inode->i_ino; +		__entry->file_offset	= ordered->file_offset; +		__entry->start		= ordered->start; +		__entry->len		= ordered->len; +		__entry->disk_len	= ordered->disk_len; +		__entry->bytes_left	= ordered->bytes_left; +		__entry->flags		= ordered->flags; +		__entry->compress_type	= ordered->compress_type; +		__entry->refs		= atomic_read(&ordered->refs); +		__entry->root_objectid	= +				BTRFS_I(inode)->root->root_key.objectid; +	), + +	TP_printk("root = %llu(%s), ino = %llu, file_offset = %llu, " +		  "start = %llu, len = %llu, disk_len = %llu, " +		  "bytes_left = %llu, flags = %s, compress_type = %d, " +		  "refs = %d", +		  show_root_type(__entry->root_objectid), +		  (unsigned long long)__entry->ino, +		  (unsigned long long)__entry->file_offset, +		  (unsigned long long)__entry->start, +		  (unsigned long long)__entry->len, +		  (unsigned long long)__entry->disk_len, +		  (unsigned long long)__entry->bytes_left, +		  show_ordered_flags(__entry->flags), +		  __entry->compress_type, __entry->refs) +); + +DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_add, + +	TP_PROTO(struct inode *inode, struct btrfs_ordered_extent *ordered), + +	TP_ARGS(inode, ordered) +); + +DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_remove, + +	TP_PROTO(struct inode *inode, struct btrfs_ordered_extent *ordered), + +	TP_ARGS(inode, ordered) +); + +DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_start, + +	TP_PROTO(struct inode *inode, struct btrfs_ordered_extent *ordered), + +	TP_ARGS(inode, ordered) +); + +DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_put, + +	TP_PROTO(struct inode *inode, struct btrfs_ordered_extent *ordered), + +	TP_ARGS(inode, ordered) +); + +DECLARE_EVENT_CLASS(btrfs__writepage, + +	TP_PROTO(struct page *page, struct inode *inode, +		 struct writeback_control *wbc), + +	TP_ARGS(page, inode, wbc), + +	TP_STRUCT__entry( +		__field(	ino_t,  ino			) +		__field(	pgoff_t,  index			) +		__field(	long,   nr_to_write		) +		__field(	long,   pages_skipped		) +		__field(	loff_t, range_start		) +		__field(	loff_t, range_end		) +		__field(	char,   for_kupdate		) +		__field(	char,   for_reclaim		) +		__field(	char,   range_cyclic		) +		__field(	pgoff_t,  writeback_index	) +		__field(	u64,    root_objectid		) +	), + +	TP_fast_assign( +		__entry->ino		= inode->i_ino; +		__entry->index		= page->index; +		__entry->nr_to_write	= wbc->nr_to_write; +		__entry->pages_skipped	= wbc->pages_skipped; +		__entry->range_start	= wbc->range_start; +		__entry->range_end	= wbc->range_end; +		__entry->for_kupdate	= wbc->for_kupdate; +		__entry->for_reclaim	= wbc->for_reclaim; +		__entry->range_cyclic	= wbc->range_cyclic; +		__entry->writeback_index = inode->i_mapping->writeback_index; +		__entry->root_objectid	= +				 BTRFS_I(inode)->root->root_key.objectid; +	), + +	TP_printk("root = %llu(%s), ino = %lu, page_index = %lu, " +		  "nr_to_write = %ld, pages_skipped = %ld, range_start = %llu, " +		  "range_end = %llu, for_kupdate = %d, " +		  "for_reclaim = %d, range_cyclic = %d, writeback_index = %lu", +		  show_root_type(__entry->root_objectid), +		  (unsigned long)__entry->ino, __entry->index, +		  __entry->nr_to_write, __entry->pages_skipped, +		  __entry->range_start, __entry->range_end, +		  __entry->for_kupdate, +		  __entry->for_reclaim, __entry->range_cyclic, +		  (unsigned long)__entry->writeback_index) +); + +DEFINE_EVENT(btrfs__writepage, __extent_writepage, + +	TP_PROTO(struct page *page, struct inode *inode, +		 struct writeback_control *wbc), + +	TP_ARGS(page, inode, wbc) +); + +TRACE_EVENT(btrfs_writepage_end_io_hook, + +	TP_PROTO(struct page *page, u64 start, u64 end, int uptodate), + +	TP_ARGS(page, start, end, uptodate), + +	TP_STRUCT__entry( +		__field(	ino_t,	 ino		) +		__field(	pgoff_t, index		) +		__field(	u64,	 start		) +		__field(	u64,	 end		) +		__field(	int,	 uptodate	) +		__field(	u64,    root_objectid	) +	), + +	TP_fast_assign( +		__entry->ino	= page->mapping->host->i_ino; +		__entry->index	= page->index; +		__entry->start	= start; +		__entry->end	= end; +		__entry->uptodate = uptodate; +		__entry->root_objectid	= +			 BTRFS_I(page->mapping->host)->root->root_key.objectid; +	), + +	TP_printk("root = %llu(%s), ino = %lu, page_index = %lu, start = %llu, " +		  "end = %llu, uptodate = %d", +		  show_root_type(__entry->root_objectid), +		  (unsigned long)__entry->ino, (unsigned long)__entry->index, +		  (unsigned long long)__entry->start, +		  (unsigned long long)__entry->end, __entry->uptodate) +); + +TRACE_EVENT(btrfs_sync_file, + +	TP_PROTO(struct file *file, int datasync), + +	TP_ARGS(file, datasync), + +	TP_STRUCT__entry( +		__field(	ino_t,  ino		) +		__field(	ino_t,  parent		) +		__field(	int,    datasync	) +		__field(	u64,    root_objectid	) +	), + +	TP_fast_assign( +		struct dentry *dentry = file->f_path.dentry; +		struct inode *inode = dentry->d_inode; + +		__entry->ino		= inode->i_ino; +		__entry->parent		= dentry->d_parent->d_inode->i_ino; +		__entry->datasync	= datasync; +		__entry->root_objectid	= +				 BTRFS_I(inode)->root->root_key.objectid; +	), + +	TP_printk("root = %llu(%s), ino = %ld, parent = %ld, datasync = %d", +		  show_root_type(__entry->root_objectid), +		  (unsigned long)__entry->ino, (unsigned long)__entry->parent, +		  __entry->datasync) +); + +TRACE_EVENT(btrfs_sync_fs, + +	TP_PROTO(int wait), + +	TP_ARGS(wait), + +	TP_STRUCT__entry( +		__field(	int,  wait		) +	), + +	TP_fast_assign( +		__entry->wait	= wait; +	), + +	TP_printk("wait = %d", __entry->wait) +); + +#define show_ref_action(action)						\ +	__print_symbolic(action,					\ +		{ BTRFS_ADD_DELAYED_REF,    "ADD_DELAYED_REF" },	\ +		{ BTRFS_DROP_DELAYED_REF,   "DROP_DELAYED_REF" },	\ +		{ BTRFS_ADD_DELAYED_EXTENT, "ADD_DELAYED_EXTENT" }, 	\ +		{ BTRFS_UPDATE_DELAYED_HEAD, "UPDATE_DELAYED_HEAD" }) +			 + +DECLARE_EVENT_CLASS(btrfs_delayed_tree_ref, + +	TP_PROTO(struct btrfs_delayed_ref_node *ref, +		 struct btrfs_delayed_tree_ref *full_ref, +		 int action), + +	TP_ARGS(ref, full_ref, action), + +	TP_STRUCT__entry( +		__field(	u64,  bytenr		) +		__field(	u64,  num_bytes		) +		__field(	int,  action		)  +		__field(	u64,  parent		) +		__field(	u64,  ref_root		) +		__field(	int,  level		) +		__field(	int,  type		) +		__field(	u64,  seq		) +	), + +	TP_fast_assign( +		__entry->bytenr		= ref->bytenr; +		__entry->num_bytes	= ref->num_bytes; +		__entry->action		= action; +		__entry->parent		= full_ref->parent; +		__entry->ref_root	= full_ref->root; +		__entry->level		= full_ref->level; +		__entry->type		= ref->type; +		__entry->seq		= ref->seq; +	), + +	TP_printk("bytenr = %llu, num_bytes = %llu, action = %s, " +		  "parent = %llu(%s), ref_root = %llu(%s), level = %d, " +		  "type = %s, seq = %llu", +		  (unsigned long long)__entry->bytenr, +		  (unsigned long long)__entry->num_bytes, +		  show_ref_action(__entry->action), +		  show_root_type(__entry->parent), +		  show_root_type(__entry->ref_root), +		  __entry->level, show_ref_type(__entry->type), +		  (unsigned long long)__entry->seq) +); + +DEFINE_EVENT(btrfs_delayed_tree_ref,  add_delayed_tree_ref, + +	TP_PROTO(struct btrfs_delayed_ref_node *ref, +		 struct btrfs_delayed_tree_ref *full_ref, +		 int action), + +	TP_ARGS(ref, full_ref, action) +); + +DEFINE_EVENT(btrfs_delayed_tree_ref,  run_delayed_tree_ref, + +	TP_PROTO(struct btrfs_delayed_ref_node *ref, +		 struct btrfs_delayed_tree_ref *full_ref, +		 int action), + +	TP_ARGS(ref, full_ref, action) +); + +DECLARE_EVENT_CLASS(btrfs_delayed_data_ref, + +	TP_PROTO(struct btrfs_delayed_ref_node *ref, +		 struct btrfs_delayed_data_ref *full_ref, +		 int action), + +	TP_ARGS(ref, full_ref, action), + +	TP_STRUCT__entry( +		__field(	u64,  bytenr		) +		__field(	u64,  num_bytes		) +		__field(	int,  action		)  +		__field(	u64,  parent		) +		__field(	u64,  ref_root		) +		__field(	u64,  owner		) +		__field(	u64,  offset		) +		__field(	int,  type		) +		__field(	u64,  seq		) +	), + +	TP_fast_assign( +		__entry->bytenr		= ref->bytenr; +		__entry->num_bytes	= ref->num_bytes; +		__entry->action		= action; +		__entry->parent		= full_ref->parent; +		__entry->ref_root	= full_ref->root; +		__entry->owner		= full_ref->objectid; +		__entry->offset		= full_ref->offset; +		__entry->type		= ref->type; +		__entry->seq		= ref->seq; +	), + +	TP_printk("bytenr = %llu, num_bytes = %llu, action = %s, " +		  "parent = %llu(%s), ref_root = %llu(%s), owner = %llu, " +		  "offset = %llu, type = %s, seq = %llu", +		  (unsigned long long)__entry->bytenr, +		  (unsigned long long)__entry->num_bytes, +		  show_ref_action(__entry->action), +		  show_root_type(__entry->parent), +		  show_root_type(__entry->ref_root), +		  (unsigned long long)__entry->owner, +		  (unsigned long long)__entry->offset, +		  show_ref_type(__entry->type), +		  (unsigned long long)__entry->seq) +); + +DEFINE_EVENT(btrfs_delayed_data_ref,  add_delayed_data_ref, + +	TP_PROTO(struct btrfs_delayed_ref_node *ref, +		 struct btrfs_delayed_data_ref *full_ref, +		 int action), + +	TP_ARGS(ref, full_ref, action) +); + +DEFINE_EVENT(btrfs_delayed_data_ref,  run_delayed_data_ref, + +	TP_PROTO(struct btrfs_delayed_ref_node *ref, +		 struct btrfs_delayed_data_ref *full_ref, +		 int action), + +	TP_ARGS(ref, full_ref, action) +); + +DECLARE_EVENT_CLASS(btrfs_delayed_ref_head, + +	TP_PROTO(struct btrfs_delayed_ref_node *ref, +		 struct btrfs_delayed_ref_head *head_ref, +		 int action), + +	TP_ARGS(ref, head_ref, action), + +	TP_STRUCT__entry( +		__field(	u64,  bytenr		) +		__field(	u64,  num_bytes		) +		__field(	int,  action		)  +		__field(	int,  is_data		) +	), + +	TP_fast_assign( +		__entry->bytenr		= ref->bytenr; +		__entry->num_bytes	= ref->num_bytes; +		__entry->action		= action; +		__entry->is_data	= head_ref->is_data; +	), + +	TP_printk("bytenr = %llu, num_bytes = %llu, action = %s, is_data = %d", +		  (unsigned long long)__entry->bytenr, +		  (unsigned long long)__entry->num_bytes, +		  show_ref_action(__entry->action), +		  __entry->is_data) +); + +DEFINE_EVENT(btrfs_delayed_ref_head,  add_delayed_ref_head, + +	TP_PROTO(struct btrfs_delayed_ref_node *ref, +		 struct btrfs_delayed_ref_head *head_ref, +		 int action), + +	TP_ARGS(ref, head_ref, action) +); + +DEFINE_EVENT(btrfs_delayed_ref_head,  run_delayed_ref_head, + +	TP_PROTO(struct btrfs_delayed_ref_node *ref, +		 struct btrfs_delayed_ref_head *head_ref, +		 int action), + +	TP_ARGS(ref, head_ref, action) +); + +#define show_chunk_type(type)					\ +	__print_flags(type, "|",				\ +		{ BTRFS_BLOCK_GROUP_DATA, 	"DATA"	},	\ +		{ BTRFS_BLOCK_GROUP_SYSTEM, 	"SYSTEM"},	\ +		{ BTRFS_BLOCK_GROUP_METADATA, 	"METADATA"},	\ +		{ BTRFS_BLOCK_GROUP_RAID0, 	"RAID0" },	\ +		{ BTRFS_BLOCK_GROUP_RAID1, 	"RAID1" },	\ +		{ BTRFS_BLOCK_GROUP_DUP, 	"DUP"	},	\ +		{ BTRFS_BLOCK_GROUP_RAID10, 	"RAID10"},	\ +		{ BTRFS_BLOCK_GROUP_RAID5, 	"RAID5"	},	\ +		{ BTRFS_BLOCK_GROUP_RAID6, 	"RAID6"	}) + +DECLARE_EVENT_CLASS(btrfs__chunk, + +	TP_PROTO(struct btrfs_root *root, struct map_lookup *map, +		 u64 offset, u64 size), + +	TP_ARGS(root, map, offset, size), + +	TP_STRUCT__entry( +		__field(	int,  num_stripes		) +		__field(	u64,  type			) +		__field(	int,  sub_stripes		) +		__field(	u64,  offset			) +		__field(	u64,  size			) +		__field(	u64,  root_objectid		) +	), + +	TP_fast_assign( +		__entry->num_stripes	= map->num_stripes; +		__entry->type		= map->type; +		__entry->sub_stripes	= map->sub_stripes; +		__entry->offset		= offset; +		__entry->size		= size; +		__entry->root_objectid	= root->root_key.objectid; +	), + +	TP_printk("root = %llu(%s), offset = %llu, size = %llu, " +		  "num_stripes = %d, sub_stripes = %d, type = %s", +		  show_root_type(__entry->root_objectid), +		  (unsigned long long)__entry->offset, +		  (unsigned long long)__entry->size, +		  __entry->num_stripes, __entry->sub_stripes, +		  show_chunk_type(__entry->type)) +); + +DEFINE_EVENT(btrfs__chunk,  btrfs_chunk_alloc, + +	TP_PROTO(struct btrfs_root *root, struct map_lookup *map, +		 u64 offset, u64 size), + +	TP_ARGS(root, map, offset, size) +); + +DEFINE_EVENT(btrfs__chunk,  btrfs_chunk_free, + +	TP_PROTO(struct btrfs_root *root, struct map_lookup *map, +		 u64 offset, u64 size), + +	TP_ARGS(root, map, offset, size) +); + +TRACE_EVENT(btrfs_cow_block, + +	TP_PROTO(struct btrfs_root *root, struct extent_buffer *buf, +		 struct extent_buffer *cow), + +	TP_ARGS(root, buf, cow), + +	TP_STRUCT__entry( +		__field(	u64,  root_objectid		) +		__field(	u64,  buf_start			) +		__field(	int,  refs			) +		__field(	u64,  cow_start			) +		__field(	int,  buf_level			) +		__field(	int,  cow_level			) +	), + +	TP_fast_assign( +		__entry->root_objectid	= root->root_key.objectid; +		__entry->buf_start	= buf->start; +		__entry->refs		= atomic_read(&buf->refs); +		__entry->cow_start	= cow->start; +		__entry->buf_level	= btrfs_header_level(buf); +		__entry->cow_level	= btrfs_header_level(cow); +	), + +	TP_printk("root = %llu(%s), refs = %d, orig_buf = %llu " +		  "(orig_level = %d), cow_buf = %llu (cow_level = %d)", +		  show_root_type(__entry->root_objectid), +		  __entry->refs, +		  (unsigned long long)__entry->buf_start, +		  __entry->buf_level, +		  (unsigned long long)__entry->cow_start, +		  __entry->cow_level) +); + +TRACE_EVENT(btrfs_space_reservation, + +	TP_PROTO(struct btrfs_fs_info *fs_info, char *type, u64 val, +		 u64 bytes, int reserve), + +	TP_ARGS(fs_info, type, val, bytes, reserve), + +	TP_STRUCT__entry( +		__array(	u8,	fsid,	BTRFS_UUID_SIZE	) +		__string(	type,	type			) +		__field(	u64,	val			) +		__field(	u64,	bytes			) +		__field(	int,	reserve			) +	), + +	TP_fast_assign( +		memcpy(__entry->fsid, fs_info->fsid, BTRFS_UUID_SIZE); +		__assign_str(type, type); +		__entry->val		= val; +		__entry->bytes		= bytes; +		__entry->reserve	= reserve; +	), + +	TP_printk("%pU: %s: %Lu %s %Lu", __entry->fsid, __get_str(type), +		  __entry->val, __entry->reserve ? "reserve" : "release", +		  __entry->bytes) +); + +DECLARE_EVENT_CLASS(btrfs__reserved_extent, + +	TP_PROTO(struct btrfs_root *root, u64 start, u64 len), + +	TP_ARGS(root, start, len), + +	TP_STRUCT__entry( +		__field(	u64,  root_objectid		) +		__field(	u64,  start			) +		__field(	u64,  len			) +	), + +	TP_fast_assign( +		__entry->root_objectid	= root->root_key.objectid; +		__entry->start		= start; +		__entry->len		= len; +	), + +	TP_printk("root = %llu(%s), start = %llu, len = %llu", +		  show_root_type(__entry->root_objectid), +		  (unsigned long long)__entry->start, +		  (unsigned long long)__entry->len) +); + +DEFINE_EVENT(btrfs__reserved_extent,  btrfs_reserved_extent_alloc, + +	TP_PROTO(struct btrfs_root *root, u64 start, u64 len), + +	TP_ARGS(root, start, len) +); + +DEFINE_EVENT(btrfs__reserved_extent,  btrfs_reserved_extent_free, + +	TP_PROTO(struct btrfs_root *root, u64 start, u64 len), + +	TP_ARGS(root, start, len) +); + +TRACE_EVENT(find_free_extent, + +	TP_PROTO(struct btrfs_root *root, u64 num_bytes, u64 empty_size, +		 u64 data), + +	TP_ARGS(root, num_bytes, empty_size, data), + +	TP_STRUCT__entry( +		__field(	u64,	root_objectid		) +		__field(	u64,	num_bytes		) +		__field(	u64,	empty_size		) +		__field(	u64,	data			) +	), + +	TP_fast_assign( +		__entry->root_objectid	= root->root_key.objectid; +		__entry->num_bytes	= num_bytes; +		__entry->empty_size	= empty_size; +		__entry->data		= data; +	), + +	TP_printk("root = %Lu(%s), len = %Lu, empty_size = %Lu, " +		  "flags = %Lu(%s)", show_root_type(__entry->root_objectid), +		  __entry->num_bytes, __entry->empty_size, __entry->data, +		  __print_flags((unsigned long)__entry->data, "|", +				 BTRFS_GROUP_FLAGS)) +); + +DECLARE_EVENT_CLASS(btrfs__reserve_extent, + +	TP_PROTO(struct btrfs_root *root, +		 struct btrfs_block_group_cache *block_group, u64 start, +		 u64 len), + +	TP_ARGS(root, block_group, start, len), + +	TP_STRUCT__entry( +		__field(	u64,	root_objectid		) +		__field(	u64,	bg_objectid		) +		__field(	u64,	flags			) +		__field(	u64,	start			) +		__field(	u64,	len			) +	), + +	TP_fast_assign( +		__entry->root_objectid	= root->root_key.objectid; +		__entry->bg_objectid	= block_group->key.objectid; +		__entry->flags		= block_group->flags; +		__entry->start		= start; +		__entry->len		= len; +	), + +	TP_printk("root = %Lu(%s), block_group = %Lu, flags = %Lu(%s), " +		  "start = %Lu, len = %Lu", +		  show_root_type(__entry->root_objectid), __entry->bg_objectid, +		  __entry->flags, __print_flags((unsigned long)__entry->flags, +						"|", BTRFS_GROUP_FLAGS), +		  __entry->start, __entry->len) +); + +DEFINE_EVENT(btrfs__reserve_extent, btrfs_reserve_extent, + +	TP_PROTO(struct btrfs_root *root, +		 struct btrfs_block_group_cache *block_group, u64 start, +		 u64 len), + +	TP_ARGS(root, block_group, start, len) +); + +DEFINE_EVENT(btrfs__reserve_extent, btrfs_reserve_extent_cluster, + +	TP_PROTO(struct btrfs_root *root, +		 struct btrfs_block_group_cache *block_group, u64 start, +		 u64 len), + +	TP_ARGS(root, block_group, start, len) +); + +TRACE_EVENT(btrfs_find_cluster, + +	TP_PROTO(struct btrfs_block_group_cache *block_group, u64 start, +		 u64 bytes, u64 empty_size, u64 min_bytes), + +	TP_ARGS(block_group, start, bytes, empty_size, min_bytes), + +	TP_STRUCT__entry( +		__field(	u64,	bg_objectid		) +		__field(	u64,	flags			) +		__field(	u64,	start			) +		__field(	u64,	bytes			) +		__field(	u64,	empty_size		) +		__field(	u64,	min_bytes		) +	), + +	TP_fast_assign( +		__entry->bg_objectid	= block_group->key.objectid; +		__entry->flags		= block_group->flags; +		__entry->start		= start; +		__entry->bytes		= bytes; +		__entry->empty_size	= empty_size; +		__entry->min_bytes	= min_bytes; +	), + +	TP_printk("block_group = %Lu, flags = %Lu(%s), start = %Lu, len = %Lu," +		  " empty_size = %Lu, min_bytes = %Lu", __entry->bg_objectid, +		  __entry->flags, +		  __print_flags((unsigned long)__entry->flags, "|", +				BTRFS_GROUP_FLAGS), __entry->start, +		  __entry->bytes, __entry->empty_size,  __entry->min_bytes) +); + +TRACE_EVENT(btrfs_failed_cluster_setup, + +	TP_PROTO(struct btrfs_block_group_cache *block_group), + +	TP_ARGS(block_group), + +	TP_STRUCT__entry( +		__field(	u64,	bg_objectid		) +	), + +	TP_fast_assign( +		__entry->bg_objectid	= block_group->key.objectid; +	), + +	TP_printk("block_group = %Lu", __entry->bg_objectid) +); + +TRACE_EVENT(btrfs_setup_cluster, + +	TP_PROTO(struct btrfs_block_group_cache *block_group, +		 struct btrfs_free_cluster *cluster, u64 size, int bitmap), + +	TP_ARGS(block_group, cluster, size, bitmap), + +	TP_STRUCT__entry( +		__field(	u64,	bg_objectid		) +		__field(	u64,	flags			) +		__field(	u64,	start			) +		__field(	u64,	max_size		) +		__field(	u64,	size			) +		__field(	int,	bitmap			) +	), + +	TP_fast_assign( +		__entry->bg_objectid	= block_group->key.objectid; +		__entry->flags		= block_group->flags; +		__entry->start		= cluster->window_start; +		__entry->max_size	= cluster->max_size; +		__entry->size		= size; +		__entry->bitmap		= bitmap; +	), + +	TP_printk("block_group = %Lu, flags = %Lu(%s), window_start = %Lu, " +		  "size = %Lu, max_size = %Lu, bitmap = %d", +		  __entry->bg_objectid, +		  __entry->flags, +		  __print_flags((unsigned long)__entry->flags, "|", +				BTRFS_GROUP_FLAGS), __entry->start, +		  __entry->size, __entry->max_size, __entry->bitmap) +); + +struct extent_state; +TRACE_EVENT(alloc_extent_state, + +	TP_PROTO(struct extent_state *state, gfp_t mask, unsigned long IP), + +	TP_ARGS(state, mask, IP), + +	TP_STRUCT__entry( +		__field(struct extent_state *, state) +		__field(gfp_t, mask) +		__field(unsigned long, ip) +	), + +	TP_fast_assign( +		__entry->state	= state, +		__entry->mask	= mask, +		__entry->ip	= IP +	), + +	TP_printk("state=%p; mask = %s; caller = %pF", __entry->state, +		  show_gfp_flags(__entry->mask), (void *)__entry->ip) +); + +TRACE_EVENT(free_extent_state, + +	TP_PROTO(struct extent_state *state, unsigned long IP), + +	TP_ARGS(state, IP), + +	TP_STRUCT__entry( +		__field(struct extent_state *, state) +		__field(unsigned long, ip) +	), + +	TP_fast_assign( +		__entry->state	= state, +		__entry->ip = IP +	), + +	TP_printk(" state=%p; caller = %pF", __entry->state, +		  (void *)__entry->ip) +); + +DECLARE_EVENT_CLASS(btrfs__work, + +	TP_PROTO(struct btrfs_work *work), + +	TP_ARGS(work), + +	TP_STRUCT__entry( +		__field(	void *,	work			) +		__field(	void *, wq			) +		__field(	void *,	func			) +		__field(	void *,	ordered_func		) +		__field(	void *,	ordered_free		) +	), + +	TP_fast_assign( +		__entry->work		= work; +		__entry->wq		= work->wq; +		__entry->func		= work->func; +		__entry->ordered_func	= work->ordered_func; +		__entry->ordered_free	= work->ordered_free; +	), + +	TP_printk("work=%p, wq=%p, func=%p, ordered_func=%p, ordered_free=%p", +		  __entry->work, __entry->wq, __entry->func, +		  __entry->ordered_func, __entry->ordered_free) +); + +/* For situiations that the work is freed */ +DECLARE_EVENT_CLASS(btrfs__work__done, + +	TP_PROTO(struct btrfs_work *work), + +	TP_ARGS(work), + +	TP_STRUCT__entry( +		__field(	void *,	work			) +	), + +	TP_fast_assign( +		__entry->work		= work; +	), + +	TP_printk("work->%p", __entry->work) +); + +DEFINE_EVENT(btrfs__work, btrfs_work_queued, + +	TP_PROTO(struct btrfs_work *work), + +	TP_ARGS(work) +); + +DEFINE_EVENT(btrfs__work, btrfs_work_sched, + +	TP_PROTO(struct btrfs_work *work), + +	TP_ARGS(work) +); + +DEFINE_EVENT(btrfs__work, btrfs_normal_work_done, + +	TP_PROTO(struct btrfs_work *work), + +	TP_ARGS(work) +); + +DEFINE_EVENT(btrfs__work__done, btrfs_all_work_done, + +	TP_PROTO(struct btrfs_work *work), + +	TP_ARGS(work) +); + +DEFINE_EVENT(btrfs__work, btrfs_ordered_sched, + +	TP_PROTO(struct btrfs_work *work), + +	TP_ARGS(work) +); + +DECLARE_EVENT_CLASS(btrfs__workqueue, + +	TP_PROTO(struct __btrfs_workqueue *wq, const char *name, int high), + +	TP_ARGS(wq, name, high), + +	TP_STRUCT__entry( +		__field(	void *,	wq			) +		__string(	name,	name			) +		__field(	int ,	high			) +	), + +	TP_fast_assign( +		__entry->wq		= wq; +		__assign_str(name, name); +		__entry->high		= high; +	), + +	TP_printk("name=%s%s, wq=%p", __get_str(name), +		  __print_flags(__entry->high, "", +				{(WQ_HIGHPRI),	"-high"}), +		  __entry->wq) +); + +DEFINE_EVENT(btrfs__workqueue, btrfs_workqueue_alloc, + +	TP_PROTO(struct __btrfs_workqueue *wq, const char *name, int high), + +	TP_ARGS(wq, name, high) +); + +DECLARE_EVENT_CLASS(btrfs__workqueue_done, + +	TP_PROTO(struct __btrfs_workqueue *wq), + +	TP_ARGS(wq), + +	TP_STRUCT__entry( +		__field(	void *,	wq			) +	), + +	TP_fast_assign( +		__entry->wq		= wq; +	), + +	TP_printk("wq=%p", __entry->wq) +); + +DEFINE_EVENT(btrfs__workqueue_done, btrfs_workqueue_destroy, + +	TP_PROTO(struct __btrfs_workqueue *wq), + +	TP_ARGS(wq) +); + +#endif /* _TRACE_BTRFS_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/compaction.h b/include/trace/events/compaction.h new file mode 100644 index 00000000000..c6814b917bd --- /dev/null +++ b/include/trace/events/compaction.h @@ -0,0 +1,133 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM compaction + +#if !defined(_TRACE_COMPACTION_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_COMPACTION_H + +#include <linux/types.h> +#include <linux/list.h> +#include <linux/tracepoint.h> +#include <trace/events/gfpflags.h> + +DECLARE_EVENT_CLASS(mm_compaction_isolate_template, + +	TP_PROTO(unsigned long nr_scanned, +		unsigned long nr_taken), + +	TP_ARGS(nr_scanned, nr_taken), + +	TP_STRUCT__entry( +		__field(unsigned long, nr_scanned) +		__field(unsigned long, nr_taken) +	), + +	TP_fast_assign( +		__entry->nr_scanned = nr_scanned; +		__entry->nr_taken = nr_taken; +	), + +	TP_printk("nr_scanned=%lu nr_taken=%lu", +		__entry->nr_scanned, +		__entry->nr_taken) +); + +DEFINE_EVENT(mm_compaction_isolate_template, mm_compaction_isolate_migratepages, + +	TP_PROTO(unsigned long nr_scanned, +		unsigned long nr_taken), + +	TP_ARGS(nr_scanned, nr_taken) +); + +DEFINE_EVENT(mm_compaction_isolate_template, mm_compaction_isolate_freepages, +	TP_PROTO(unsigned long nr_scanned, +		unsigned long nr_taken), + +	TP_ARGS(nr_scanned, nr_taken) +); + +TRACE_EVENT(mm_compaction_migratepages, + +	TP_PROTO(unsigned long nr_all, +		int migrate_rc, +		struct list_head *migratepages), + +	TP_ARGS(nr_all, migrate_rc, migratepages), + +	TP_STRUCT__entry( +		__field(unsigned long, nr_migrated) +		__field(unsigned long, nr_failed) +	), + +	TP_fast_assign( +		unsigned long nr_failed = 0; +		struct list_head *page_lru; + +		/* +		 * migrate_pages() returns either a non-negative number +		 * with the number of pages that failed migration, or an +		 * error code, in which case we need to count the remaining +		 * pages manually +		 */ +		if (migrate_rc >= 0) +			nr_failed = migrate_rc; +		else +			list_for_each(page_lru, migratepages) +				nr_failed++; + +		__entry->nr_migrated = nr_all - nr_failed; +		__entry->nr_failed = nr_failed; +	), + +	TP_printk("nr_migrated=%lu nr_failed=%lu", +		__entry->nr_migrated, +		__entry->nr_failed) +); + +TRACE_EVENT(mm_compaction_begin, +	TP_PROTO(unsigned long zone_start, unsigned long migrate_start, +		unsigned long free_start, unsigned long zone_end), + +	TP_ARGS(zone_start, migrate_start, free_start, zone_end), + +	TP_STRUCT__entry( +		__field(unsigned long, zone_start) +		__field(unsigned long, migrate_start) +		__field(unsigned long, free_start) +		__field(unsigned long, zone_end) +	), + +	TP_fast_assign( +		__entry->zone_start = zone_start; +		__entry->migrate_start = migrate_start; +		__entry->free_start = free_start; +		__entry->zone_end = zone_end; +	), + +	TP_printk("zone_start=%lu migrate_start=%lu free_start=%lu zone_end=%lu", +		__entry->zone_start, +		__entry->migrate_start, +		__entry->free_start, +		__entry->zone_end) +); + +TRACE_EVENT(mm_compaction_end, +	TP_PROTO(int status), + +	TP_ARGS(status), + +	TP_STRUCT__entry( +		__field(int, status) +	), + +	TP_fast_assign( +		__entry->status = status; +	), + +	TP_printk("status=%d", __entry->status) +); + +#endif /* _TRACE_COMPACTION_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/context_tracking.h b/include/trace/events/context_tracking.h new file mode 100644 index 00000000000..ce8007cf29c --- /dev/null +++ b/include/trace/events/context_tracking.h @@ -0,0 +1,58 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM context_tracking + +#if !defined(_TRACE_CONTEXT_TRACKING_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_CONTEXT_TRACKING_H + +#include <linux/tracepoint.h> + +DECLARE_EVENT_CLASS(context_tracking_user, + +	TP_PROTO(int dummy), + +	TP_ARGS(dummy), + +	TP_STRUCT__entry( +		__field( int,	dummy	) +	), + +	TP_fast_assign( +		__entry->dummy		= dummy; +	), + +	TP_printk("%s", "") +); + +/** + * user_enter - called when the kernel resumes to userspace + * @dummy:	dummy arg to make trace event macro happy + * + * This event occurs when the kernel resumes to userspace  after + * an exception or a syscall. + */ +DEFINE_EVENT(context_tracking_user, user_enter, + +	TP_PROTO(int dummy), + +	TP_ARGS(dummy) +); + +/** + * user_exit - called when userspace enters the kernel + * @dummy:	dummy arg to make trace event macro happy + * + * This event occurs when userspace enters the kernel through + * an exception or a syscall. + */ +DEFINE_EVENT(context_tracking_user, user_exit, + +	TP_PROTO(int dummy), + +	TP_ARGS(dummy) +); + + +#endif /*  _TRACE_CONTEXT_TRACKING_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/ext3.h b/include/trace/events/ext3.h new file mode 100644 index 00000000000..6797b9de90e --- /dev/null +++ b/include/trace/events/ext3.h @@ -0,0 +1,866 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM ext3 + +#if !defined(_TRACE_EXT3_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_EXT3_H + +#include <linux/tracepoint.h> + +TRACE_EVENT(ext3_free_inode, +	TP_PROTO(struct inode *inode), + +	TP_ARGS(inode), + +	TP_STRUCT__entry( +		__field(	dev_t,	dev			) +		__field(	ino_t,	ino			) +		__field(	umode_t, mode			) +		__field(	uid_t,	uid			) +		__field(	gid_t,	gid			) +		__field(	blkcnt_t, blocks		) +	), + +	TP_fast_assign( +		__entry->dev	= inode->i_sb->s_dev; +		__entry->ino	= inode->i_ino; +		__entry->mode	= inode->i_mode; +		__entry->uid	= i_uid_read(inode); +		__entry->gid	= i_gid_read(inode); +		__entry->blocks	= inode->i_blocks; +	), + +	TP_printk("dev %d,%d ino %lu mode 0%o uid %u gid %u blocks %lu", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  (unsigned long) __entry->ino, +		  __entry->mode, __entry->uid, __entry->gid, +		  (unsigned long) __entry->blocks) +); + +TRACE_EVENT(ext3_request_inode, +	TP_PROTO(struct inode *dir, int mode), + +	TP_ARGS(dir, mode), + +	TP_STRUCT__entry( +		__field(	dev_t,	dev			) +		__field(	ino_t,	dir			) +		__field(	umode_t, mode			) +	), + +	TP_fast_assign( +		__entry->dev	= dir->i_sb->s_dev; +		__entry->dir	= dir->i_ino; +		__entry->mode	= mode; +	), + +	TP_printk("dev %d,%d dir %lu mode 0%o", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  (unsigned long) __entry->dir, __entry->mode) +); + +TRACE_EVENT(ext3_allocate_inode, +	TP_PROTO(struct inode *inode, struct inode *dir, int mode), + +	TP_ARGS(inode, dir, mode), + +	TP_STRUCT__entry( +		__field(	dev_t,	dev			) +		__field(	ino_t,	ino			) +		__field(	ino_t,	dir			) +		__field(	umode_t, mode			) +	), + +	TP_fast_assign( +		__entry->dev	= inode->i_sb->s_dev; +		__entry->ino	= inode->i_ino; +		__entry->dir	= dir->i_ino; +		__entry->mode	= mode; +	), + +	TP_printk("dev %d,%d ino %lu dir %lu mode 0%o", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  (unsigned long) __entry->ino, +		  (unsigned long) __entry->dir, __entry->mode) +); + +TRACE_EVENT(ext3_evict_inode, +	TP_PROTO(struct inode *inode), + +	TP_ARGS(inode), + +	TP_STRUCT__entry( +		__field(	dev_t,	dev			) +		__field(	ino_t,	ino			) +		__field(	int,	nlink			) +	), + +	TP_fast_assign( +		__entry->dev	= inode->i_sb->s_dev; +		__entry->ino	= inode->i_ino; +		__entry->nlink	= inode->i_nlink; +	), + +	TP_printk("dev %d,%d ino %lu nlink %d", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  (unsigned long) __entry->ino, __entry->nlink) +); + +TRACE_EVENT(ext3_drop_inode, +	TP_PROTO(struct inode *inode, int drop), + +	TP_ARGS(inode, drop), + +	TP_STRUCT__entry( +		__field(	dev_t,	dev			) +		__field(	ino_t,	ino			) +		__field(	int,	drop			) +	), + +	TP_fast_assign( +		__entry->dev	= inode->i_sb->s_dev; +		__entry->ino	= inode->i_ino; +		__entry->drop	= drop; +	), + +	TP_printk("dev %d,%d ino %lu drop %d", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  (unsigned long) __entry->ino, __entry->drop) +); + +TRACE_EVENT(ext3_mark_inode_dirty, +	TP_PROTO(struct inode *inode, unsigned long IP), + +	TP_ARGS(inode, IP), + +	TP_STRUCT__entry( +		__field(	dev_t,	dev			) +		__field(	ino_t,	ino			) +		__field(unsigned long,	ip			) +	), + +	TP_fast_assign( +		__entry->dev	= inode->i_sb->s_dev; +		__entry->ino	= inode->i_ino; +		__entry->ip	= IP; +	), + +	TP_printk("dev %d,%d ino %lu caller %pF", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  (unsigned long) __entry->ino, (void *)__entry->ip) +); + +TRACE_EVENT(ext3_write_begin, +	TP_PROTO(struct inode *inode, loff_t pos, unsigned int len, +		 unsigned int flags), + +	TP_ARGS(inode, pos, len, flags), + +	TP_STRUCT__entry( +		__field(	dev_t,	dev			) +		__field(	ino_t,	ino			) +		__field(	loff_t,	pos			) +		__field(	unsigned int, len		) +		__field(	unsigned int, flags		) +	), + +	TP_fast_assign( +		__entry->dev	= inode->i_sb->s_dev; +		__entry->ino	= inode->i_ino; +		__entry->pos	= pos; +		__entry->len	= len; +		__entry->flags	= flags; +	), + +	TP_printk("dev %d,%d ino %lu pos %llu len %u flags %u", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  (unsigned long) __entry->ino, +		  (unsigned long long) __entry->pos, __entry->len, +		  __entry->flags) +); + +DECLARE_EVENT_CLASS(ext3__write_end, +	TP_PROTO(struct inode *inode, loff_t pos, unsigned int len, +			unsigned int copied), + +	TP_ARGS(inode, pos, len, copied), + +	TP_STRUCT__entry( +		__field(	dev_t,	dev			) +		__field(	ino_t,	ino			) +		__field(	loff_t,	pos			) +		__field(	unsigned int, len		) +		__field(	unsigned int, copied		) +	), + +	TP_fast_assign( +		__entry->dev	= inode->i_sb->s_dev; +		__entry->ino	= inode->i_ino; +		__entry->pos	= pos; +		__entry->len	= len; +		__entry->copied	= copied; +	), + +	TP_printk("dev %d,%d ino %lu pos %llu len %u copied %u", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  (unsigned long) __entry->ino, +		  (unsigned long long) __entry->pos, __entry->len, +		  __entry->copied) +); + +DEFINE_EVENT(ext3__write_end, ext3_ordered_write_end, + +	TP_PROTO(struct inode *inode, loff_t pos, unsigned int len, +		 unsigned int copied), + +	TP_ARGS(inode, pos, len, copied) +); + +DEFINE_EVENT(ext3__write_end, ext3_writeback_write_end, + +	TP_PROTO(struct inode *inode, loff_t pos, unsigned int len, +		 unsigned int copied), + +	TP_ARGS(inode, pos, len, copied) +); + +DEFINE_EVENT(ext3__write_end, ext3_journalled_write_end, + +	TP_PROTO(struct inode *inode, loff_t pos, unsigned int len, +		 unsigned int copied), + +	TP_ARGS(inode, pos, len, copied) +); + +DECLARE_EVENT_CLASS(ext3__page_op, +	TP_PROTO(struct page *page), + +	TP_ARGS(page), + +	TP_STRUCT__entry( +		__field(	dev_t,	dev			) +		__field(	ino_t,	ino			) +		__field(	pgoff_t, index			) + +	), + +	TP_fast_assign( +		__entry->index	= page->index; +		__entry->ino	= page->mapping->host->i_ino; +		__entry->dev	= page->mapping->host->i_sb->s_dev; +	), + +	TP_printk("dev %d,%d ino %lu page_index %lu", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  (unsigned long) __entry->ino, __entry->index) +); + +DEFINE_EVENT(ext3__page_op, ext3_ordered_writepage, + +	TP_PROTO(struct page *page), + +	TP_ARGS(page) +); + +DEFINE_EVENT(ext3__page_op, ext3_writeback_writepage, + +	TP_PROTO(struct page *page), + +	TP_ARGS(page) +); + +DEFINE_EVENT(ext3__page_op, ext3_journalled_writepage, + +	TP_PROTO(struct page *page), + +	TP_ARGS(page) +); + +DEFINE_EVENT(ext3__page_op, ext3_readpage, + +	TP_PROTO(struct page *page), + +	TP_ARGS(page) +); + +DEFINE_EVENT(ext3__page_op, ext3_releasepage, + +	TP_PROTO(struct page *page), + +	TP_ARGS(page) +); + +TRACE_EVENT(ext3_invalidatepage, +	TP_PROTO(struct page *page, unsigned int offset, unsigned int length), + +	TP_ARGS(page, offset, length), + +	TP_STRUCT__entry( +		__field(	pgoff_t, index			) +		__field(	unsigned int, offset		) +		__field(	unsigned int, length		) +		__field(	ino_t,	ino			) +		__field(	dev_t,	dev			) + +	), + +	TP_fast_assign( +		__entry->index	= page->index; +		__entry->offset	= offset; +		__entry->length	= length; +		__entry->ino	= page->mapping->host->i_ino; +		__entry->dev	= page->mapping->host->i_sb->s_dev; +	), + +	TP_printk("dev %d,%d ino %lu page_index %lu offset %u length %u", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  (unsigned long) __entry->ino, +		  __entry->index, __entry->offset, __entry->length) +); + +TRACE_EVENT(ext3_discard_blocks, +	TP_PROTO(struct super_block *sb, unsigned long blk, +			unsigned long count), + +	TP_ARGS(sb, blk, count), + +	TP_STRUCT__entry( +		__field(	dev_t,		dev		) +		__field(	unsigned long,	blk		) +		__field(	unsigned long,	count		) + +	), + +	TP_fast_assign( +		__entry->dev	= sb->s_dev; +		__entry->blk	= blk; +		__entry->count	= count; +	), + +	TP_printk("dev %d,%d blk %lu count %lu", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  __entry->blk, __entry->count) +); + +TRACE_EVENT(ext3_request_blocks, +	TP_PROTO(struct inode *inode, unsigned long goal, +		 unsigned long count), + +	TP_ARGS(inode, goal, count), + +	TP_STRUCT__entry( +		__field(	dev_t,	dev			) +		__field(	ino_t,	ino			) +		__field(	unsigned long, count		) +		__field(	unsigned long,	goal		) +	), + +	TP_fast_assign( +		__entry->dev	= inode->i_sb->s_dev; +		__entry->ino	= inode->i_ino; +		__entry->count	= count; +		__entry->goal	= goal; +	), + +	TP_printk("dev %d,%d ino %lu count %lu goal %lu ", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  (unsigned long) __entry->ino, +		  __entry->count, __entry->goal) +); + +TRACE_EVENT(ext3_allocate_blocks, +	TP_PROTO(struct inode *inode, unsigned long goal, +		 unsigned long count, unsigned long block), + +	TP_ARGS(inode, goal, count, block), + +	TP_STRUCT__entry( +		__field(	dev_t,	dev			) +		__field(	ino_t,	ino			) +		__field(	unsigned long,	block		) +		__field(	unsigned long, count		) +		__field(	unsigned long,	goal		) +	), + +	TP_fast_assign( +		__entry->dev	= inode->i_sb->s_dev; +		__entry->ino	= inode->i_ino; +		__entry->block	= block; +		__entry->count	= count; +		__entry->goal	= goal; +	), + +	TP_printk("dev %d,%d ino %lu count %lu block %lu goal %lu", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  (unsigned long) __entry->ino, +		   __entry->count, __entry->block, +		  __entry->goal) +); + +TRACE_EVENT(ext3_free_blocks, +	TP_PROTO(struct inode *inode, unsigned long block, +		 unsigned long count), + +	TP_ARGS(inode, block, count), + +	TP_STRUCT__entry( +		__field(	dev_t,	dev			) +		__field(	ino_t,	ino			) +		__field(	umode_t, mode			) +		__field(	unsigned long,	block		) +		__field(	unsigned long,	count		) +	), + +	TP_fast_assign( +		__entry->dev		= inode->i_sb->s_dev; +		__entry->ino		= inode->i_ino; +		__entry->mode		= inode->i_mode; +		__entry->block		= block; +		__entry->count		= count; +	), + +	TP_printk("dev %d,%d ino %lu mode 0%o block %lu count %lu", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  (unsigned long) __entry->ino, +		  __entry->mode, __entry->block, __entry->count) +); + +TRACE_EVENT(ext3_sync_file_enter, +	TP_PROTO(struct file *file, int datasync), + +	TP_ARGS(file, datasync), + +	TP_STRUCT__entry( +		__field(	dev_t,	dev			) +		__field(	ino_t,	ino			) +		__field(	ino_t,	parent			) +		__field(	int,	datasync		) +	), + +	TP_fast_assign( +		struct dentry *dentry = file->f_path.dentry; + +		__entry->dev		= dentry->d_inode->i_sb->s_dev; +		__entry->ino		= dentry->d_inode->i_ino; +		__entry->datasync	= datasync; +		__entry->parent		= dentry->d_parent->d_inode->i_ino; +	), + +	TP_printk("dev %d,%d ino %lu parent %ld datasync %d ", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  (unsigned long) __entry->ino, +		  (unsigned long) __entry->parent, __entry->datasync) +); + +TRACE_EVENT(ext3_sync_file_exit, +	TP_PROTO(struct inode *inode, int ret), + +	TP_ARGS(inode, ret), + +	TP_STRUCT__entry( +		__field(	int,	ret			) +		__field(	ino_t,	ino			) +		__field(	dev_t,	dev			) +	), + +	TP_fast_assign( +		__entry->ret		= ret; +		__entry->ino		= inode->i_ino; +		__entry->dev		= inode->i_sb->s_dev; +	), + +	TP_printk("dev %d,%d ino %lu ret %d", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  (unsigned long) __entry->ino, +		  __entry->ret) +); + +TRACE_EVENT(ext3_sync_fs, +	TP_PROTO(struct super_block *sb, int wait), + +	TP_ARGS(sb, wait), + +	TP_STRUCT__entry( +		__field(	dev_t,	dev			) +		__field(	int,	wait			) + +	), + +	TP_fast_assign( +		__entry->dev	= sb->s_dev; +		__entry->wait	= wait; +	), + +	TP_printk("dev %d,%d wait %d", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  __entry->wait) +); + +TRACE_EVENT(ext3_rsv_window_add, +	TP_PROTO(struct super_block *sb, +		 struct ext3_reserve_window_node *rsv_node), + +	TP_ARGS(sb, rsv_node), + +	TP_STRUCT__entry( +		__field(	unsigned long,	start		) +		__field(	unsigned long,	end		) +		__field(	dev_t,	dev			) +	), + +	TP_fast_assign( +		__entry->dev	= sb->s_dev; +		__entry->start	= rsv_node->rsv_window._rsv_start; +		__entry->end	= rsv_node->rsv_window._rsv_end; +	), + +	TP_printk("dev %d,%d start %lu end %lu", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  __entry->start, __entry->end) +); + +TRACE_EVENT(ext3_discard_reservation, +	TP_PROTO(struct inode *inode, +		 struct ext3_reserve_window_node *rsv_node), + +	TP_ARGS(inode, rsv_node), + +	TP_STRUCT__entry( +		__field(	unsigned long,	start		) +		__field(	unsigned long,	end		) +		__field(	ino_t,	ino			) +		__field(	dev_t,	dev			) +	), + +	TP_fast_assign( +		__entry->start	= rsv_node->rsv_window._rsv_start; +		__entry->end	= rsv_node->rsv_window._rsv_end; +		__entry->ino	= inode->i_ino; +		__entry->dev	= inode->i_sb->s_dev; +	), + +	TP_printk("dev %d,%d ino %lu start %lu end %lu", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  (unsigned long)__entry->ino, __entry->start, +		  __entry->end) +); + +TRACE_EVENT(ext3_alloc_new_reservation, +	TP_PROTO(struct super_block *sb, unsigned long goal), + +	TP_ARGS(sb, goal), + +	TP_STRUCT__entry( +		__field(	dev_t,	dev			) +		__field(	unsigned long,	goal		) +	), + +	TP_fast_assign( +		__entry->dev	= sb->s_dev; +		__entry->goal	= goal; +	), + +	TP_printk("dev %d,%d goal %lu", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  __entry->goal) +); + +TRACE_EVENT(ext3_reserved, +	TP_PROTO(struct super_block *sb, unsigned long block, +		 struct ext3_reserve_window_node *rsv_node), + +	TP_ARGS(sb, block, rsv_node), + +	TP_STRUCT__entry( +		__field(	unsigned long,	block		) +		__field(	unsigned long,	start		) +		__field(	unsigned long,	end		) +		__field(	dev_t,	dev			) +	), + +	TP_fast_assign( +		__entry->block	= block; +		__entry->start	= rsv_node->rsv_window._rsv_start; +		__entry->end	= rsv_node->rsv_window._rsv_end; +		__entry->dev	= sb->s_dev; +	), + +	TP_printk("dev %d,%d block %lu, start %lu end %lu", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  __entry->block, __entry->start, __entry->end) +); + +TRACE_EVENT(ext3_forget, +	TP_PROTO(struct inode *inode, int is_metadata, unsigned long block), + +	TP_ARGS(inode, is_metadata, block), + +	TP_STRUCT__entry( +		__field(	dev_t,	dev			) +		__field(	ino_t,	ino			) +		__field(	umode_t, mode			) +		__field(	int,	is_metadata		) +		__field(	unsigned long,	block		) +	), + +	TP_fast_assign( +		__entry->dev	= inode->i_sb->s_dev; +		__entry->ino	= inode->i_ino; +		__entry->mode	= inode->i_mode; +		__entry->is_metadata = is_metadata; +		__entry->block	= block; +	), + +	TP_printk("dev %d,%d ino %lu mode 0%o is_metadata %d block %lu", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  (unsigned long) __entry->ino, +		  __entry->mode, __entry->is_metadata, __entry->block) +); + +TRACE_EVENT(ext3_read_block_bitmap, +	TP_PROTO(struct super_block *sb, unsigned int group), + +	TP_ARGS(sb, group), + +	TP_STRUCT__entry( +		__field(	dev_t,	dev			) +		__field(	__u32,	group			) + +	), + +	TP_fast_assign( +		__entry->dev	= sb->s_dev; +		__entry->group	= group; +	), + +	TP_printk("dev %d,%d group %u", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  __entry->group) +); + +TRACE_EVENT(ext3_direct_IO_enter, +	TP_PROTO(struct inode *inode, loff_t offset, unsigned long len, int rw), + +	TP_ARGS(inode, offset, len, rw), + +	TP_STRUCT__entry( +		__field(	ino_t,	ino			) +		__field(	dev_t,	dev			) +		__field(	loff_t,	pos			) +		__field(	unsigned long,	len		) +		__field(	int,	rw			) +	), + +	TP_fast_assign( +		__entry->ino	= inode->i_ino; +		__entry->dev	= inode->i_sb->s_dev; +		__entry->pos	= offset; +		__entry->len	= len; +		__entry->rw	= rw; +	), + +	TP_printk("dev %d,%d ino %lu pos %llu len %lu rw %d", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  (unsigned long) __entry->ino, +		  (unsigned long long) __entry->pos, __entry->len, +		  __entry->rw) +); + +TRACE_EVENT(ext3_direct_IO_exit, +	TP_PROTO(struct inode *inode, loff_t offset, unsigned long len, +		 int rw, int ret), + +	TP_ARGS(inode, offset, len, rw, ret), + +	TP_STRUCT__entry( +		__field(	ino_t,	ino			) +		__field(	dev_t,	dev			) +		__field(	loff_t,	pos			) +		__field(	unsigned long,	len		) +		__field(	int,	rw			) +		__field(	int,	ret			) +	), + +	TP_fast_assign( +		__entry->ino	= inode->i_ino; +		__entry->dev	= inode->i_sb->s_dev; +		__entry->pos	= offset; +		__entry->len	= len; +		__entry->rw	= rw; +		__entry->ret	= ret; +	), + +	TP_printk("dev %d,%d ino %lu pos %llu len %lu rw %d ret %d", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  (unsigned long) __entry->ino, +		  (unsigned long long) __entry->pos, __entry->len, +		  __entry->rw, __entry->ret) +); + +TRACE_EVENT(ext3_unlink_enter, +	TP_PROTO(struct inode *parent, struct dentry *dentry), + +	TP_ARGS(parent, dentry), + +	TP_STRUCT__entry( +		__field(	ino_t,	parent			) +		__field(	ino_t,	ino			) +		__field(	loff_t,	size			) +		__field(	dev_t,	dev			) +	), + +	TP_fast_assign( +		__entry->parent		= parent->i_ino; +		__entry->ino		= dentry->d_inode->i_ino; +		__entry->size		= dentry->d_inode->i_size; +		__entry->dev		= dentry->d_inode->i_sb->s_dev; +	), + +	TP_printk("dev %d,%d ino %lu size %lld parent %ld", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  (unsigned long) __entry->ino, +		  (unsigned long long)__entry->size, +		  (unsigned long) __entry->parent) +); + +TRACE_EVENT(ext3_unlink_exit, +	TP_PROTO(struct dentry *dentry, int ret), + +	TP_ARGS(dentry, ret), + +	TP_STRUCT__entry( +		__field(	ino_t,	ino			) +		__field(	dev_t,	dev			) +		__field(	int,	ret			) +	), + +	TP_fast_assign( +		__entry->ino		= dentry->d_inode->i_ino; +		__entry->dev		= dentry->d_inode->i_sb->s_dev; +		__entry->ret		= ret; +	), + +	TP_printk("dev %d,%d ino %lu ret %d", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  (unsigned long) __entry->ino, +		  __entry->ret) +); + +DECLARE_EVENT_CLASS(ext3__truncate, +	TP_PROTO(struct inode *inode), + +	TP_ARGS(inode), + +	TP_STRUCT__entry( +		__field(	ino_t,		ino		) +		__field(	dev_t,		dev		) +		__field(	blkcnt_t,	blocks		) +	), + +	TP_fast_assign( +		__entry->ino    = inode->i_ino; +		__entry->dev    = inode->i_sb->s_dev; +		__entry->blocks	= inode->i_blocks; +	), + +	TP_printk("dev %d,%d ino %lu blocks %lu", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  (unsigned long) __entry->ino, (unsigned long) __entry->blocks) +); + +DEFINE_EVENT(ext3__truncate, ext3_truncate_enter, + +	TP_PROTO(struct inode *inode), + +	TP_ARGS(inode) +); + +DEFINE_EVENT(ext3__truncate, ext3_truncate_exit, + +	TP_PROTO(struct inode *inode), + +	TP_ARGS(inode) +); + +TRACE_EVENT(ext3_get_blocks_enter, +	TP_PROTO(struct inode *inode, unsigned long lblk, +		 unsigned long len, int create), + +	TP_ARGS(inode, lblk, len, create), + +	TP_STRUCT__entry( +		__field(	ino_t,		ino		) +		__field(	dev_t,		dev		) +		__field(	unsigned long,	lblk		) +		__field(	unsigned long,	len		) +		__field(	int,		create		) +	), + +	TP_fast_assign( +		__entry->ino    = inode->i_ino; +		__entry->dev    = inode->i_sb->s_dev; +		__entry->lblk	= lblk; +		__entry->len	= len; +		__entry->create	= create; +	), + +	TP_printk("dev %d,%d ino %lu lblk %lu len %lu create %u", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  (unsigned long) __entry->ino, +		  __entry->lblk, __entry->len, __entry->create) +); + +TRACE_EVENT(ext3_get_blocks_exit, +	TP_PROTO(struct inode *inode, unsigned long lblk, +		 unsigned long pblk, unsigned long len, int ret), + +	TP_ARGS(inode, lblk, pblk, len, ret), + +	TP_STRUCT__entry( +		__field(	ino_t,		ino		) +		__field(	dev_t,		dev		) +		__field(	unsigned long,	lblk		) +		__field(	unsigned long,	pblk		) +		__field(	unsigned long,	len		) +		__field(	int,		ret		) +	), + +	TP_fast_assign( +		__entry->ino    = inode->i_ino; +		__entry->dev    = inode->i_sb->s_dev; +		__entry->lblk	= lblk; +		__entry->pblk	= pblk; +		__entry->len	= len; +		__entry->ret	= ret; +	), + +	TP_printk("dev %d,%d ino %lu lblk %lu pblk %lu len %lu ret %d", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  (unsigned long) __entry->ino, +		   __entry->lblk, __entry->pblk, +		  __entry->len, __entry->ret) +); + +TRACE_EVENT(ext3_load_inode, +	TP_PROTO(struct inode *inode), + +	TP_ARGS(inode), + +	TP_STRUCT__entry( +		__field(	ino_t,	ino		) +		__field(	dev_t,	dev		) +	), + +	TP_fast_assign( +		__entry->ino		= inode->i_ino; +		__entry->dev		= inode->i_sb->s_dev; +	), + +	TP_printk("dev %d,%d ino %lu", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  (unsigned long) __entry->ino) +); + +#endif /* _TRACE_EXT3_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h index e5e345fb2a5..d4f70a7fe87 100644 --- a/include/trace/events/ext4.h +++ b/include/trace/events/ext4.h @@ -9,42 +9,99 @@  struct ext4_allocation_context;  struct ext4_allocation_request; +struct ext4_extent;  struct ext4_prealloc_space;  struct ext4_inode_info;  struct mpage_da_data; +struct ext4_map_blocks; +struct extent_status;  #define EXT4_I(inode) (container_of(inode, struct ext4_inode_info, vfs_inode)) +#define show_mballoc_flags(flags) __print_flags(flags, "|",	\ +	{ EXT4_MB_HINT_MERGE,		"HINT_MERGE" },		\ +	{ EXT4_MB_HINT_RESERVED,	"HINT_RESV" },		\ +	{ EXT4_MB_HINT_METADATA,	"HINT_MDATA" },		\ +	{ EXT4_MB_HINT_FIRST,		"HINT_FIRST" },		\ +	{ EXT4_MB_HINT_BEST,		"HINT_BEST" },		\ +	{ EXT4_MB_HINT_DATA,		"HINT_DATA" },		\ +	{ EXT4_MB_HINT_NOPREALLOC,	"HINT_NOPREALLOC" },	\ +	{ EXT4_MB_HINT_GROUP_ALLOC,	"HINT_GRP_ALLOC" },	\ +	{ EXT4_MB_HINT_GOAL_ONLY,	"HINT_GOAL_ONLY" },	\ +	{ EXT4_MB_HINT_TRY_GOAL,	"HINT_TRY_GOAL" },	\ +	{ EXT4_MB_DELALLOC_RESERVED,	"DELALLOC_RESV" },	\ +	{ EXT4_MB_STREAM_ALLOC,		"STREAM_ALLOC" },	\ +	{ EXT4_MB_USE_ROOT_BLOCKS,	"USE_ROOT_BLKS" },	\ +	{ EXT4_MB_USE_RESERVED,		"USE_RESV" }) + +#define show_map_flags(flags) __print_flags(flags, "|",			\ +	{ EXT4_GET_BLOCKS_CREATE,		"CREATE" },		\ +	{ EXT4_GET_BLOCKS_UNWRIT_EXT,		"UNWRIT" },		\ +	{ EXT4_GET_BLOCKS_DELALLOC_RESERVE,	"DELALLOC" },		\ +	{ EXT4_GET_BLOCKS_PRE_IO,		"PRE_IO" },		\ +	{ EXT4_GET_BLOCKS_CONVERT,		"CONVERT" },		\ +	{ EXT4_GET_BLOCKS_METADATA_NOFAIL,	"METADATA_NOFAIL" },	\ +	{ EXT4_GET_BLOCKS_NO_NORMALIZE,		"NO_NORMALIZE" },	\ +	{ EXT4_GET_BLOCKS_KEEP_SIZE,		"KEEP_SIZE" },		\ +	{ EXT4_GET_BLOCKS_NO_LOCK,		"NO_LOCK" },		\ +	{ EXT4_GET_BLOCKS_NO_PUT_HOLE,		"NO_PUT_HOLE" }) + +#define show_mflags(flags) __print_flags(flags, "",	\ +	{ EXT4_MAP_NEW,		"N" },			\ +	{ EXT4_MAP_MAPPED,	"M" },			\ +	{ EXT4_MAP_UNWRITTEN,	"U" },			\ +	{ EXT4_MAP_BOUNDARY,	"B" },			\ +	{ EXT4_MAP_FROM_CLUSTER, "C" }) + +#define show_free_flags(flags) __print_flags(flags, "|",	\ +	{ EXT4_FREE_BLOCKS_METADATA,		"METADATA" },	\ +	{ EXT4_FREE_BLOCKS_FORGET,		"FORGET" },	\ +	{ EXT4_FREE_BLOCKS_VALIDATED,		"VALIDATED" },	\ +	{ EXT4_FREE_BLOCKS_NO_QUOT_UPDATE,	"NO_QUOTA" },	\ +	{ EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER,"1ST_CLUSTER" },\ +	{ EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER,	"LAST_CLUSTER" }) + +#define show_extent_status(status) __print_flags(status, "",	\ +	{ EXTENT_STATUS_WRITTEN,	"W" },			\ +	{ EXTENT_STATUS_UNWRITTEN,	"U" },			\ +	{ EXTENT_STATUS_DELAYED,	"D" },			\ +	{ EXTENT_STATUS_HOLE,		"H" }) + +#define show_falloc_mode(mode) __print_flags(mode, "|",		\ +	{ FALLOC_FL_KEEP_SIZE,		"KEEP_SIZE"},		\ +	{ FALLOC_FL_PUNCH_HOLE,		"PUNCH_HOLE"},		\ +	{ FALLOC_FL_NO_HIDE_STALE,	"NO_HIDE_STALE"},	\ +	{ FALLOC_FL_COLLAPSE_RANGE,	"COLLAPSE_RANGE"},	\ +	{ FALLOC_FL_ZERO_RANGE,		"ZERO_RANGE"}) + +  TRACE_EVENT(ext4_free_inode,  	TP_PROTO(struct inode *inode),  	TP_ARGS(inode),  	TP_STRUCT__entry( -		__field(	int,   dev_major                ) -		__field(	int,   dev_minor                ) +		__field(	dev_t,	dev			)  		__field(	ino_t,	ino			) -		__field(	umode_t, mode			)  		__field(	uid_t,	uid			)  		__field(	gid_t,	gid			) -		__field(	blkcnt_t, blocks		) +		__field(	__u64, blocks			) +		__field(	__u16, mode			)  	),  	TP_fast_assign( -		__entry->dev_major = MAJOR(inode->i_sb->s_dev); -		__entry->dev_minor = MINOR(inode->i_sb->s_dev); +		__entry->dev	= inode->i_sb->s_dev;  		__entry->ino	= inode->i_ino; -		__entry->mode	= inode->i_mode; -		__entry->uid	= inode->i_uid; -		__entry->gid	= inode->i_gid; +		__entry->uid	= i_uid_read(inode); +		__entry->gid	= i_gid_read(inode);  		__entry->blocks	= inode->i_blocks; +		__entry->mode	= inode->i_mode;  	),  	TP_printk("dev %d,%d ino %lu mode 0%o uid %u gid %u blocks %llu", -		  __entry->dev_major, __entry->dev_minor, +		  MAJOR(__entry->dev), MINOR(__entry->dev),  		  (unsigned long) __entry->ino, __entry->mode, -		  __entry->uid, __entry->gid, -		  (unsigned long long) __entry->blocks) +		  __entry->uid, __entry->gid, __entry->blocks)  );  TRACE_EVENT(ext4_request_inode, @@ -53,21 +110,19 @@ TRACE_EVENT(ext4_request_inode,  	TP_ARGS(dir, mode),  	TP_STRUCT__entry( -		__field(	int,   dev_major                ) -		__field(	int,   dev_minor                ) +		__field(	dev_t,	dev			)  		__field(	ino_t,	dir			) -		__field(	umode_t, mode			) +		__field(	__u16, mode			)  	),  	TP_fast_assign( -		__entry->dev_major = MAJOR(dir->i_sb->s_dev); -		__entry->dev_minor = MINOR(dir->i_sb->s_dev); +		__entry->dev	= dir->i_sb->s_dev;  		__entry->dir	= dir->i_ino;  		__entry->mode	= mode;  	),  	TP_printk("dev %d,%d dir %lu mode 0%o", -		  __entry->dev_major, __entry->dev_minor, +		  MAJOR(__entry->dev), MINOR(__entry->dev),  		  (unsigned long) __entry->dir, __entry->mode)  ); @@ -77,23 +132,21 @@ TRACE_EVENT(ext4_allocate_inode,  	TP_ARGS(inode, dir, mode),  	TP_STRUCT__entry( -		__field(	int,   dev_major                ) -		__field(	int,   dev_minor                ) +		__field(	dev_t,	dev			)  		__field(	ino_t,	ino			)  		__field(	ino_t,	dir			) -		__field(	umode_t, mode			) +		__field(	__u16,	mode			)  	),  	TP_fast_assign( -		__entry->dev_major = MAJOR(inode->i_sb->s_dev); -		__entry->dev_minor = MINOR(inode->i_sb->s_dev); +		__entry->dev	= inode->i_sb->s_dev;  		__entry->ino	= inode->i_ino;  		__entry->dir	= dir->i_ino;  		__entry->mode	= mode;  	),  	TP_printk("dev %d,%d ino %lu dir %lu mode 0%o", -		  __entry->dev_major, __entry->dev_minor, +		  MAJOR(__entry->dev), MINOR(__entry->dev),  		  (unsigned long) __entry->ino,  		  (unsigned long) __entry->dir, __entry->mode)  ); @@ -104,21 +157,19 @@ TRACE_EVENT(ext4_evict_inode,  	TP_ARGS(inode),  	TP_STRUCT__entry( -		__field(	int,   dev_major                ) -		__field(	int,   dev_minor                ) +		__field(	dev_t,	dev			)  		__field(	ino_t,	ino			)  		__field(	int,	nlink			)  	),  	TP_fast_assign( -		__entry->dev_major = MAJOR(inode->i_sb->s_dev); -		__entry->dev_minor = MINOR(inode->i_sb->s_dev); +		__entry->dev	= inode->i_sb->s_dev;  		__entry->ino	= inode->i_ino;  		__entry->nlink	= inode->i_nlink;  	),  	TP_printk("dev %d,%d ino %lu nlink %d", -		  __entry->dev_major, __entry->dev_minor, +		  MAJOR(__entry->dev), MINOR(__entry->dev),  		  (unsigned long) __entry->ino, __entry->nlink)  ); @@ -128,21 +179,19 @@ TRACE_EVENT(ext4_drop_inode,  	TP_ARGS(inode, drop),  	TP_STRUCT__entry( -		__field(	int,	dev_major		) -		__field(	int,	dev_minor		) +		__field(	dev_t,	dev			)  		__field(	ino_t,	ino			)  		__field(	int,	drop			)  	),  	TP_fast_assign( -		__entry->dev_major = MAJOR(inode->i_sb->s_dev); -		__entry->dev_minor = MINOR(inode->i_sb->s_dev); +		__entry->dev	= inode->i_sb->s_dev;  		__entry->ino	= inode->i_ino;  		__entry->drop	= drop;  	),  	TP_printk("dev %d,%d ino %lu drop %d", -		  __entry->dev_major, __entry->dev_minor, +		  MAJOR(__entry->dev), MINOR(__entry->dev),  		  (unsigned long) __entry->ino, __entry->drop)  ); @@ -152,21 +201,19 @@ TRACE_EVENT(ext4_mark_inode_dirty,  	TP_ARGS(inode, IP),  	TP_STRUCT__entry( -		__field(	int,	dev_major		) -		__field(	int,	dev_minor		) +		__field(	dev_t,	dev			)  		__field(	ino_t,	ino			)  		__field(unsigned long,	ip			)  	),  	TP_fast_assign( -		__entry->dev_major = MAJOR(inode->i_sb->s_dev); -		__entry->dev_minor = MINOR(inode->i_sb->s_dev); +		__entry->dev	= inode->i_sb->s_dev;  		__entry->ino	= inode->i_ino;  		__entry->ip	= IP;  	),  	TP_printk("dev %d,%d ino %lu caller %pF", -		  __entry->dev_major, __entry->dev_minor, +		  MAJOR(__entry->dev), MINOR(__entry->dev),  		  (unsigned long) __entry->ino, (void *)__entry->ip)  ); @@ -176,23 +223,21 @@ TRACE_EVENT(ext4_begin_ordered_truncate,  	TP_ARGS(inode, new_size),  	TP_STRUCT__entry( -		__field(	int,	dev_major		) -		__field(	int,	dev_minor		) +		__field(	dev_t,	dev			)  		__field(	ino_t,	ino			)  		__field(	loff_t,	new_size		)  	),  	TP_fast_assign( -		__entry->dev_major	= MAJOR(inode->i_sb->s_dev); -		__entry->dev_minor	= MINOR(inode->i_sb->s_dev); +		__entry->dev		= inode->i_sb->s_dev;  		__entry->ino		= inode->i_ino;  		__entry->new_size	= new_size;  	),  	TP_printk("dev %d,%d ino %lu new_size %lld", -		  __entry->dev_major, __entry->dev_minor, +		  MAJOR(__entry->dev), MINOR(__entry->dev),  		  (unsigned long) __entry->ino, -		  (long long) __entry->new_size) +		  __entry->new_size)  );  DECLARE_EVENT_CLASS(ext4__write_begin, @@ -203,8 +248,7 @@ DECLARE_EVENT_CLASS(ext4__write_begin,  	TP_ARGS(inode, pos, len, flags),  	TP_STRUCT__entry( -		__field(	int,   dev_major                ) -		__field(	int,   dev_minor                ) +		__field(	dev_t,	dev			)  		__field(	ino_t,	ino			)  		__field(	loff_t,	pos			)  		__field(	unsigned int, len		) @@ -212,16 +256,15 @@ DECLARE_EVENT_CLASS(ext4__write_begin,  	),  	TP_fast_assign( -		__entry->dev_major = MAJOR(inode->i_sb->s_dev); -		__entry->dev_minor = MINOR(inode->i_sb->s_dev); +		__entry->dev	= inode->i_sb->s_dev;  		__entry->ino	= inode->i_ino;  		__entry->pos	= pos;  		__entry->len	= len;  		__entry->flags	= flags;  	), -	TP_printk("dev %d,%d ino %lu pos %llu len %u flags %u", -		  __entry->dev_major, __entry->dev_minor, +	TP_printk("dev %d,%d ino %lu pos %lld len %u flags %u", +		  MAJOR(__entry->dev), MINOR(__entry->dev),  		  (unsigned long) __entry->ino,  		  __entry->pos, __entry->len, __entry->flags)  ); @@ -249,8 +292,7 @@ DECLARE_EVENT_CLASS(ext4__write_end,  	TP_ARGS(inode, pos, len, copied),  	TP_STRUCT__entry( -		__field(	int,   dev_major                ) -		__field(	int,   dev_minor                ) +		__field(	dev_t,	dev			)  		__field(	ino_t,	ino			)  		__field(	loff_t,	pos			)  		__field(	unsigned int, len		) @@ -258,29 +300,20 @@ DECLARE_EVENT_CLASS(ext4__write_end,  	),  	TP_fast_assign( -		__entry->dev_major = MAJOR(inode->i_sb->s_dev); -		__entry->dev_minor = MINOR(inode->i_sb->s_dev); +		__entry->dev	= inode->i_sb->s_dev;  		__entry->ino	= inode->i_ino;  		__entry->pos	= pos;  		__entry->len	= len;  		__entry->copied	= copied;  	), -	TP_printk("dev %d,%d ino %lu pos %llu len %u copied %u", -		  __entry->dev_major, __entry->dev_minor, -		  (unsigned long) __entry->ino, __entry->pos, -		  __entry->len, __entry->copied) -); - -DEFINE_EVENT(ext4__write_end, ext4_ordered_write_end, - -	TP_PROTO(struct inode *inode, loff_t pos, unsigned int len, -		 unsigned int copied), - -	TP_ARGS(inode, pos, len, copied) +	TP_printk("dev %d,%d ino %lu pos %lld len %u copied %u", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  (unsigned long) __entry->ino, +		  __entry->pos, __entry->len, __entry->copied)  ); -DEFINE_EVENT(ext4__write_end, ext4_writeback_write_end, +DEFINE_EVENT(ext4__write_end, ext4_write_end,  	TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,  		 unsigned int copied), @@ -304,150 +337,224 @@ DEFINE_EVENT(ext4__write_end, ext4_da_write_end,  	TP_ARGS(inode, pos, len, copied)  ); -TRACE_EVENT(ext4_writepage, -	TP_PROTO(struct inode *inode, struct page *page), - -	TP_ARGS(inode, page), - -	TP_STRUCT__entry( -		__field(	int,   dev_major                ) -		__field(	int,   dev_minor                ) -		__field(	ino_t,	ino			) -		__field(	pgoff_t, index			) - -	), - -	TP_fast_assign( -		__entry->dev_major = MAJOR(inode->i_sb->s_dev); -		__entry->dev_minor = MINOR(inode->i_sb->s_dev); -		__entry->ino	= inode->i_ino; -		__entry->index	= page->index; -	), - -	TP_printk("dev %d,%d ino %lu page_index %lu", -		  __entry->dev_major, __entry->dev_minor, -		  (unsigned long) __entry->ino, __entry->index) -); - -TRACE_EVENT(ext4_da_writepages, +TRACE_EVENT(ext4_writepages,  	TP_PROTO(struct inode *inode, struct writeback_control *wbc),  	TP_ARGS(inode, wbc),  	TP_STRUCT__entry( -		__field(	int,   dev_major                ) -		__field(	int,   dev_minor                ) +		__field(	dev_t,	dev			)  		__field(	ino_t,	ino			)  		__field(	long,	nr_to_write		)  		__field(	long,	pages_skipped		)  		__field(	loff_t,	range_start		)  		__field(	loff_t,	range_end		) +		__field(       pgoff_t,	writeback_index		) +		__field(	int,	sync_mode		)  		__field(	char,	for_kupdate		) -		__field(	char,	for_reclaim		)  		__field(	char,	range_cyclic		) -		__field(       pgoff_t,	writeback_index		)  	),  	TP_fast_assign( -		__entry->dev_major	= MAJOR(inode->i_sb->s_dev); -		__entry->dev_minor	= MINOR(inode->i_sb->s_dev); +		__entry->dev		= inode->i_sb->s_dev;  		__entry->ino		= inode->i_ino;  		__entry->nr_to_write	= wbc->nr_to_write;  		__entry->pages_skipped	= wbc->pages_skipped;  		__entry->range_start	= wbc->range_start;  		__entry->range_end	= wbc->range_end; +		__entry->writeback_index = inode->i_mapping->writeback_index; +		__entry->sync_mode	= wbc->sync_mode;  		__entry->for_kupdate	= wbc->for_kupdate; -		__entry->for_reclaim	= wbc->for_reclaim;  		__entry->range_cyclic	= wbc->range_cyclic; -		__entry->writeback_index = inode->i_mapping->writeback_index;  	),  	TP_printk("dev %d,%d ino %lu nr_to_write %ld pages_skipped %ld " -		  "range_start %llu range_end %llu " -		  "for_kupdate %d for_reclaim %d " -		  "range_cyclic %d writeback_index %lu", -		  __entry->dev_major, __entry->dev_minor, +		  "range_start %lld range_end %lld sync_mode %d " +		  "for_kupdate %d range_cyclic %d writeback_index %lu", +		  MAJOR(__entry->dev), MINOR(__entry->dev),  		  (unsigned long) __entry->ino, __entry->nr_to_write,  		  __entry->pages_skipped, __entry->range_start, -		  __entry->range_end, -		  __entry->for_kupdate, __entry->for_reclaim, -		  __entry->range_cyclic, +		  __entry->range_end, __entry->sync_mode, +		  __entry->for_kupdate, __entry->range_cyclic,  		  (unsigned long) __entry->writeback_index)  );  TRACE_EVENT(ext4_da_write_pages, -	TP_PROTO(struct inode *inode, struct mpage_da_data *mpd), +	TP_PROTO(struct inode *inode, pgoff_t first_page, +		 struct writeback_control *wbc), -	TP_ARGS(inode, mpd), +	TP_ARGS(inode, first_page, wbc),  	TP_STRUCT__entry( -		__field(	int,   dev_major                ) -		__field(	int,   dev_minor                ) +		__field(	dev_t,	dev			)  		__field(	ino_t,	ino			) -		__field(	__u64,	b_blocknr		) -		__field(	__u32,	b_size			) -		__field(	__u32,	b_state			) -		__field(	unsigned long,	first_page	) -		__field(	int,	io_done			) -		__field(	int,	pages_written		) +		__field(      pgoff_t,	first_page		) +		__field(	 long,	nr_to_write		) +		__field(	  int,	sync_mode		)  	),  	TP_fast_assign( -		__entry->dev_major	= MAJOR(inode->i_sb->s_dev); -		__entry->dev_minor	= MINOR(inode->i_sb->s_dev); +		__entry->dev		= inode->i_sb->s_dev;  		__entry->ino		= inode->i_ino; -		__entry->b_blocknr	= mpd->b_blocknr; -		__entry->b_size		= mpd->b_size; -		__entry->b_state	= mpd->b_state; -		__entry->first_page	= mpd->first_page; -		__entry->io_done	= mpd->io_done; -		__entry->pages_written	= mpd->pages_written; +		__entry->first_page	= first_page; +		__entry->nr_to_write	= wbc->nr_to_write; +		__entry->sync_mode	= wbc->sync_mode;  	), -	TP_printk("dev %d,%d ino %lu b_blocknr %llu b_size %u b_state 0x%04x first_page %lu io_done %d pages_written %d", -		  __entry->dev_major, __entry->dev_minor, -		  (unsigned long) __entry->ino, -		  __entry->b_blocknr, __entry->b_size, -		  __entry->b_state, __entry->first_page, -		  __entry->io_done, __entry->pages_written) +	TP_printk("dev %d,%d ino %lu first_page %lu nr_to_write %ld " +		  "sync_mode %d", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  (unsigned long) __entry->ino, __entry->first_page, +		  __entry->nr_to_write, __entry->sync_mode) +); + +TRACE_EVENT(ext4_da_write_pages_extent, +	TP_PROTO(struct inode *inode, struct ext4_map_blocks *map), + +	TP_ARGS(inode, map), + +	TP_STRUCT__entry( +		__field(	dev_t,	dev			) +		__field(	ino_t,	ino			) +		__field(	__u64,	lblk			) +		__field(	__u32,	len			) +		__field(	__u32,	flags			) +	), + +	TP_fast_assign( +		__entry->dev		= inode->i_sb->s_dev; +		__entry->ino		= inode->i_ino; +		__entry->lblk		= map->m_lblk; +		__entry->len		= map->m_len; +		__entry->flags		= map->m_flags; +	), + +	TP_printk("dev %d,%d ino %lu lblk %llu len %u flags %s", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  (unsigned long) __entry->ino, __entry->lblk, __entry->len, +		  show_mflags(__entry->flags))  ); -TRACE_EVENT(ext4_da_writepages_result, +TRACE_EVENT(ext4_writepages_result,  	TP_PROTO(struct inode *inode, struct writeback_control *wbc,  			int ret, int pages_written),  	TP_ARGS(inode, wbc, ret, pages_written),  	TP_STRUCT__entry( -		__field(	int,   dev_major                ) -		__field(	int,   dev_minor                ) +		__field(	dev_t,	dev			)  		__field(	ino_t,	ino			)  		__field(	int,	ret			)  		__field(	int,	pages_written		)  		__field(	long,	pages_skipped		) -		__field(	char,	more_io			)	  		__field(       pgoff_t,	writeback_index		) +		__field(	int,	sync_mode		)  	),  	TP_fast_assign( -		__entry->dev_major	= MAJOR(inode->i_sb->s_dev); -		__entry->dev_minor	= MINOR(inode->i_sb->s_dev); +		__entry->dev		= inode->i_sb->s_dev;  		__entry->ino		= inode->i_ino;  		__entry->ret		= ret;  		__entry->pages_written	= pages_written;  		__entry->pages_skipped	= wbc->pages_skipped; -		__entry->more_io	= wbc->more_io;  		__entry->writeback_index = inode->i_mapping->writeback_index; +		__entry->sync_mode	= wbc->sync_mode;  	), -	TP_printk("dev %d,%d ino %lu ret %d pages_written %d pages_skipped %ld more_io %d writeback_index %lu", -		  __entry->dev_major, __entry->dev_minor, +	TP_printk("dev %d,%d ino %lu ret %d pages_written %d pages_skipped %ld " +		  "sync_mode %d writeback_index %lu", +		  MAJOR(__entry->dev), MINOR(__entry->dev),  		  (unsigned long) __entry->ino, __entry->ret,  		  __entry->pages_written, __entry->pages_skipped, -		  __entry->more_io, +		  __entry->sync_mode,  		  (unsigned long) __entry->writeback_index)  ); +DECLARE_EVENT_CLASS(ext4__page_op, +	TP_PROTO(struct page *page), + +	TP_ARGS(page), + +	TP_STRUCT__entry( +		__field(	dev_t,	dev			) +		__field(	ino_t,	ino			) +		__field(	pgoff_t, index			) + +	), + +	TP_fast_assign( +		__entry->dev	= page->mapping->host->i_sb->s_dev; +		__entry->ino	= page->mapping->host->i_ino; +		__entry->index	= page->index; +	), + +	TP_printk("dev %d,%d ino %lu page_index %lu", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  (unsigned long) __entry->ino, +		  (unsigned long) __entry->index) +); + +DEFINE_EVENT(ext4__page_op, ext4_writepage, + +	TP_PROTO(struct page *page), + +	TP_ARGS(page) +); + +DEFINE_EVENT(ext4__page_op, ext4_readpage, + +	TP_PROTO(struct page *page), + +	TP_ARGS(page) +); + +DEFINE_EVENT(ext4__page_op, ext4_releasepage, + +	TP_PROTO(struct page *page), + +	TP_ARGS(page) +); + +DECLARE_EVENT_CLASS(ext4_invalidatepage_op, +	TP_PROTO(struct page *page, unsigned int offset, unsigned int length), + +	TP_ARGS(page, offset, length), + +	TP_STRUCT__entry( +		__field(	dev_t,	dev			) +		__field(	ino_t,	ino			) +		__field(	pgoff_t, index			) +		__field(	unsigned int, offset		) +		__field(	unsigned int, length		) +	), + +	TP_fast_assign( +		__entry->dev	= page->mapping->host->i_sb->s_dev; +		__entry->ino	= page->mapping->host->i_ino; +		__entry->index	= page->index; +		__entry->offset	= offset; +		__entry->length	= length; +	), + +	TP_printk("dev %d,%d ino %lu page_index %lu offset %u length %u", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  (unsigned long) __entry->ino, +		  (unsigned long) __entry->index, +		  __entry->offset, __entry->length) +); + +DEFINE_EVENT(ext4_invalidatepage_op, ext4_invalidatepage, +	TP_PROTO(struct page *page, unsigned int offset, unsigned int length), + +	TP_ARGS(page, offset, length) +); + +DEFINE_EVENT(ext4_invalidatepage_op, ext4_journalled_invalidatepage, +	TP_PROTO(struct page *page, unsigned int offset, unsigned int length), + +	TP_ARGS(page, offset, length) +); +  TRACE_EVENT(ext4_discard_blocks,  	TP_PROTO(struct super_block *sb, unsigned long long blk,  			unsigned long long count), @@ -455,22 +562,20 @@ TRACE_EVENT(ext4_discard_blocks,  	TP_ARGS(sb, blk, count),  	TP_STRUCT__entry( -		__field(	int,   dev_major                ) -		__field(	int,   dev_minor                ) +		__field(	dev_t,	dev			)  		__field(	__u64,	blk			)  		__field(	__u64,	count			)  	),  	TP_fast_assign( -		__entry->dev_major = MAJOR(sb->s_dev); -		__entry->dev_minor = MINOR(sb->s_dev); +		__entry->dev	= sb->s_dev;  		__entry->blk	= blk;  		__entry->count	= count;  	),  	TP_printk("dev %d,%d blk %llu count %llu", -		  __entry->dev_major, __entry->dev_minor, +		  MAJOR(__entry->dev), MINOR(__entry->dev),  		  __entry->blk, __entry->count)  ); @@ -481,28 +586,26 @@ DECLARE_EVENT_CLASS(ext4__mb_new_pa,  	TP_ARGS(ac, pa),  	TP_STRUCT__entry( -		__field(	int,   dev_major                ) -		__field(	int,   dev_minor                ) +		__field(	dev_t,	dev			)  		__field(	ino_t,	ino			)  		__field(	__u64,	pa_pstart		) -		__field(	__u32,	pa_len			)  		__field(	__u64,	pa_lstart		) +		__field(	__u32,	pa_len			)  	),  	TP_fast_assign( -		__entry->dev_major	= MAJOR(ac->ac_sb->s_dev); -		__entry->dev_minor	= MINOR(ac->ac_sb->s_dev); +		__entry->dev		= ac->ac_sb->s_dev;  		__entry->ino		= ac->ac_inode->i_ino;  		__entry->pa_pstart	= pa->pa_pstart; -		__entry->pa_len		= pa->pa_len;  		__entry->pa_lstart	= pa->pa_lstart; +		__entry->pa_len		= pa->pa_len;  	),  	TP_printk("dev %d,%d ino %lu pstart %llu len %u lstart %llu", -		  __entry->dev_major, __entry->dev_minor, -		  (unsigned long) __entry->ino, __entry->pa_pstart, -		  __entry->pa_len, __entry->pa_lstart) +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  (unsigned long) __entry->ino, +		  __entry->pa_pstart, __entry->pa_len, __entry->pa_lstart)  );  DEFINE_EVENT(ext4__mb_new_pa, ext4_mb_new_inode_pa, @@ -522,16 +625,13 @@ DEFINE_EVENT(ext4__mb_new_pa, ext4_mb_new_group_pa,  );  TRACE_EVENT(ext4_mb_release_inode_pa, -	TP_PROTO(struct super_block *sb, -		 struct inode *inode, -		 struct ext4_prealloc_space *pa, +	TP_PROTO(struct ext4_prealloc_space *pa,  		 unsigned long long block, unsigned int count), -	TP_ARGS(sb, inode, pa, block, count), +	TP_ARGS(pa, block, count),  	TP_STRUCT__entry( -		__field(	int,   dev_major                ) -		__field(	int,   dev_minor                ) +		__field(	dev_t,	dev			)  		__field(	ino_t,	ino			)  		__field(	__u64,	block			)  		__field(	__u32,	count			) @@ -539,41 +639,38 @@ TRACE_EVENT(ext4_mb_release_inode_pa,  	),  	TP_fast_assign( -		__entry->dev_major	= MAJOR(sb->s_dev); -		__entry->dev_minor	= MINOR(sb->s_dev); -		__entry->ino		= inode->i_ino; +		__entry->dev		= pa->pa_inode->i_sb->s_dev; +		__entry->ino		= pa->pa_inode->i_ino;  		__entry->block		= block;  		__entry->count		= count;  	),  	TP_printk("dev %d,%d ino %lu block %llu count %u", -		  __entry->dev_major, __entry->dev_minor, -		  (unsigned long) __entry->ino, __entry->block, __entry->count) +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  (unsigned long) __entry->ino, +		  __entry->block, __entry->count)  );  TRACE_EVENT(ext4_mb_release_group_pa, -	TP_PROTO(struct super_block *sb, -		 struct ext4_prealloc_space *pa), +	TP_PROTO(struct super_block *sb, struct ext4_prealloc_space *pa),  	TP_ARGS(sb, pa),  	TP_STRUCT__entry( -		__field(	int,   dev_major                ) -		__field(	int,   dev_minor                ) +		__field(	dev_t,	dev			)  		__field(	__u64,	pa_pstart		)  		__field(	__u32,	pa_len			)  	),  	TP_fast_assign( -		__entry->dev_major	= MAJOR(sb->s_dev); -		__entry->dev_minor	= MINOR(sb->s_dev); +		__entry->dev		= sb->s_dev;  		__entry->pa_pstart	= pa->pa_pstart;  		__entry->pa_len		= pa->pa_len;  	),  	TP_printk("dev %d,%d pstart %llu len %u", -		  __entry->dev_major, __entry->dev_minor, +		  MAJOR(__entry->dev), MINOR(__entry->dev),  		  __entry->pa_pstart, __entry->pa_len)  ); @@ -583,20 +680,18 @@ TRACE_EVENT(ext4_discard_preallocations,  	TP_ARGS(inode),  	TP_STRUCT__entry( -		__field(	int,   dev_major                ) -		__field(	int,   dev_minor                ) +		__field(	dev_t,	dev			)  		__field(	ino_t,	ino			)  	),  	TP_fast_assign( -		__entry->dev_major = MAJOR(inode->i_sb->s_dev); -		__entry->dev_minor = MINOR(inode->i_sb->s_dev); +		__entry->dev	= inode->i_sb->s_dev;  		__entry->ino	= inode->i_ino;  	),  	TP_printk("dev %d,%d ino %lu", -		  __entry->dev_major, __entry->dev_minor, +		  MAJOR(__entry->dev), MINOR(__entry->dev),  		  (unsigned long) __entry->ino)  ); @@ -606,20 +701,19 @@ TRACE_EVENT(ext4_mb_discard_preallocations,  	TP_ARGS(sb, needed),  	TP_STRUCT__entry( -		__field(	int,   dev_major                ) -		__field(	int,   dev_minor                ) +		__field(	dev_t,	dev			)  		__field(	int,	needed			)  	),  	TP_fast_assign( -		__entry->dev_major = MAJOR(sb->s_dev); -		__entry->dev_minor = MINOR(sb->s_dev); +		__entry->dev	= sb->s_dev;  		__entry->needed	= needed;  	),  	TP_printk("dev %d,%d needed %d", -		  __entry->dev_major, __entry->dev_minor, __entry->needed) +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  __entry->needed)  );  TRACE_EVENT(ext4_request_blocks, @@ -628,24 +722,21 @@ TRACE_EVENT(ext4_request_blocks,  	TP_ARGS(ar),  	TP_STRUCT__entry( -		__field(	int,   dev_major                ) -		__field(	int,   dev_minor                ) +		__field(	dev_t,	dev			)  		__field(	ino_t,	ino			) -		__field(	unsigned int, flags		)  		__field(	unsigned int, len		) -		__field(	__u64,  logical			) +		__field(	__u32,  logical			) +		__field(	__u32,	lleft			) +		__field(	__u32,	lright			)  		__field(	__u64,	goal			) -		__field(	__u64,	lleft			) -		__field(	__u64,	lright			)  		__field(	__u64,	pleft			)  		__field(	__u64,	pright			) +		__field(	unsigned int, flags		)  	),  	TP_fast_assign( -		__entry->dev_major = MAJOR(ar->inode->i_sb->s_dev); -		__entry->dev_minor = MINOR(ar->inode->i_sb->s_dev); +		__entry->dev	= ar->inode->i_sb->s_dev;  		__entry->ino	= ar->inode->i_ino; -		__entry->flags	= ar->flags;  		__entry->len	= ar->len;  		__entry->logical = ar->logical;  		__entry->goal	= ar->goal; @@ -653,18 +744,16 @@ TRACE_EVENT(ext4_request_blocks,  		__entry->lright	= ar->lright;  		__entry->pleft	= ar->pleft;  		__entry->pright	= ar->pright; +		__entry->flags	= ar->flags;  	), -	TP_printk("dev %d,%d ino %lu flags %u len %u lblk %llu goal %llu lleft %llu lright %llu pleft %llu pright %llu ", -		  __entry->dev_major, __entry->dev_minor, -		  (unsigned long) __entry->ino, -		  __entry->flags, __entry->len, -		  (unsigned long long) __entry->logical, -		  (unsigned long long) __entry->goal, -		  (unsigned long long) __entry->lleft, -		  (unsigned long long) __entry->lright, -		  (unsigned long long) __entry->pleft, -		  (unsigned long long) __entry->pright) +	TP_printk("dev %d,%d ino %lu flags %s len %u lblk %u goal %llu " +		  "lleft %u lright %u pleft %llu pright %llu ", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  (unsigned long) __entry->ino, show_mballoc_flags(__entry->flags), +		  __entry->len, __entry->logical, __entry->goal, +		  __entry->lleft, __entry->lright, __entry->pleft, +		  __entry->pright)  );  TRACE_EVENT(ext4_allocate_blocks, @@ -673,26 +762,23 @@ TRACE_EVENT(ext4_allocate_blocks,  	TP_ARGS(ar, block),  	TP_STRUCT__entry( -		__field(	int,   dev_major                ) -		__field(	int,   dev_minor                ) +		__field(	dev_t,	dev			)  		__field(	ino_t,	ino			)  		__field(	__u64,	block			) -		__field(	unsigned int, flags		)  		__field(	unsigned int, len		) -		__field(	__u64,  logical			) +		__field(	__u32,  logical			) +		__field(	__u32,	lleft			) +		__field(	__u32,	lright			)  		__field(	__u64,	goal			) -		__field(	__u64,	lleft			) -		__field(	__u64,	lright			)  		__field(	__u64,	pleft			)  		__field(	__u64,	pright			) +		__field(	unsigned int, flags		)  	),  	TP_fast_assign( -		__entry->dev_major = MAJOR(ar->inode->i_sb->s_dev); -		__entry->dev_minor = MINOR(ar->inode->i_sb->s_dev); +		__entry->dev	= ar->inode->i_sb->s_dev;  		__entry->ino	= ar->inode->i_ino;  		__entry->block	= block; -		__entry->flags	= ar->flags;  		__entry->len	= ar->len;  		__entry->logical = ar->logical;  		__entry->goal	= ar->goal; @@ -700,18 +786,16 @@ TRACE_EVENT(ext4_allocate_blocks,  		__entry->lright	= ar->lright;  		__entry->pleft	= ar->pleft;  		__entry->pright	= ar->pright; +		__entry->flags	= ar->flags;  	), -	TP_printk("dev %d,%d ino %lu flags %u len %u block %llu lblk %llu goal %llu lleft %llu lright %llu pleft %llu pright %llu ", -		  __entry->dev_major, __entry->dev_minor, -		  (unsigned long) __entry->ino, __entry->flags, -		  __entry->len, __entry->block, -		  (unsigned long long) __entry->logical, -		  (unsigned long long) __entry->goal, -		  (unsigned long long) __entry->lleft, -		  (unsigned long long) __entry->lright, -		  (unsigned long long) __entry->pleft, -		  (unsigned long long) __entry->pright) +	TP_printk("dev %d,%d ino %lu flags %s len %u block %llu lblk %u " +		  "goal %llu lleft %u lright %u pleft %llu pright %llu", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  (unsigned long) __entry->ino, show_mballoc_flags(__entry->flags), +		  __entry->len, __entry->block, __entry->logical, +		  __entry->goal,  __entry->lleft, __entry->lright, +		  __entry->pleft, __entry->pright)  );  TRACE_EVENT(ext4_free_blocks, @@ -721,40 +805,37 @@ TRACE_EVENT(ext4_free_blocks,  	TP_ARGS(inode, block, count, flags),  	TP_STRUCT__entry( -		__field(	int,   dev_major                ) -		__field(	int,   dev_minor                ) +		__field(	dev_t,	dev			)  		__field(	ino_t,	ino			) -		__field(      umode_t, mode			)  		__field(	__u64,	block			)  		__field(	unsigned long,	count		) -		__field(	 int,	flags			) +		__field(	int,	flags			) +		__field(	__u16,	mode			)  	),  	TP_fast_assign( -		__entry->dev_major	= MAJOR(inode->i_sb->s_dev); -		__entry->dev_minor	= MINOR(inode->i_sb->s_dev); +		__entry->dev		= inode->i_sb->s_dev;  		__entry->ino		= inode->i_ino; -		__entry->mode		= inode->i_mode;  		__entry->block		= block;  		__entry->count		= count;  		__entry->flags		= flags; +		__entry->mode		= inode->i_mode;  	), -	TP_printk("dev %d,%d ino %lu mode 0%o block %llu count %lu flags %d", -		  __entry->dev_major, __entry->dev_minor, +	TP_printk("dev %d,%d ino %lu mode 0%o block %llu count %lu flags %s", +		  MAJOR(__entry->dev), MINOR(__entry->dev),  		  (unsigned long) __entry->ino,  		  __entry->mode, __entry->block, __entry->count, -		  __entry->flags) +		  show_free_flags(__entry->flags))  ); -TRACE_EVENT(ext4_sync_file, +TRACE_EVENT(ext4_sync_file_enter,  	TP_PROTO(struct file *file, int datasync),  	TP_ARGS(file, datasync),  	TP_STRUCT__entry( -		__field(	int,   dev_major                ) -		__field(	int,   dev_minor                ) +		__field(	dev_t,	dev			)  		__field(	ino_t,	ino			)  		__field(	ino_t,	parent			)  		__field(	int,	datasync		) @@ -763,39 +844,60 @@ TRACE_EVENT(ext4_sync_file,  	TP_fast_assign(  		struct dentry *dentry = file->f_path.dentry; -		__entry->dev_major	= MAJOR(dentry->d_inode->i_sb->s_dev); -		__entry->dev_minor	= MINOR(dentry->d_inode->i_sb->s_dev); +		__entry->dev		= dentry->d_inode->i_sb->s_dev;  		__entry->ino		= dentry->d_inode->i_ino;  		__entry->datasync	= datasync;  		__entry->parent		= dentry->d_parent->d_inode->i_ino;  	), -	TP_printk("dev %d,%d ino %ld parent %ld datasync %d ", -		  __entry->dev_major, __entry->dev_minor, +	TP_printk("dev %d,%d ino %lu parent %lu datasync %d ", +		  MAJOR(__entry->dev), MINOR(__entry->dev),  		  (unsigned long) __entry->ino,  		  (unsigned long) __entry->parent, __entry->datasync)  ); +TRACE_EVENT(ext4_sync_file_exit, +	TP_PROTO(struct inode *inode, int ret), + +	TP_ARGS(inode, ret), + +	TP_STRUCT__entry( +		__field(	dev_t,	dev			) +		__field(	ino_t,	ino			) +		__field(	int,	ret			) +	), + +	TP_fast_assign( +		__entry->dev		= inode->i_sb->s_dev; +		__entry->ino		= inode->i_ino; +		__entry->ret		= ret; +	), + +	TP_printk("dev %d,%d ino %lu ret %d", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  (unsigned long) __entry->ino, +		  __entry->ret) +); +  TRACE_EVENT(ext4_sync_fs,  	TP_PROTO(struct super_block *sb, int wait),  	TP_ARGS(sb, wait),  	TP_STRUCT__entry( -		__field(	int,   dev_major                ) -		__field(	int,   dev_minor                ) +		__field(	dev_t,	dev			)  		__field(	int,	wait			)  	),  	TP_fast_assign( -		__entry->dev_major = MAJOR(sb->s_dev); -		__entry->dev_minor = MINOR(sb->s_dev); +		__entry->dev	= sb->s_dev;  		__entry->wait	= wait;  	), -	TP_printk("dev %d,%d wait %d", __entry->dev_major, -		  __entry->dev_minor, __entry->wait) +	TP_printk("dev %d,%d wait %d", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  __entry->wait)  );  TRACE_EVENT(ext4_alloc_da_blocks, @@ -804,23 +906,21 @@ TRACE_EVENT(ext4_alloc_da_blocks,  	TP_ARGS(inode),  	TP_STRUCT__entry( -		__field(	int,   dev_major                ) -		__field(	int,   dev_minor                ) +		__field(	dev_t,	dev			)  		__field(	ino_t,	ino			)  		__field( unsigned int,	data_blocks	)  		__field( unsigned int,	meta_blocks	)  	),  	TP_fast_assign( -		__entry->dev_major = MAJOR(inode->i_sb->s_dev); -		__entry->dev_minor = MINOR(inode->i_sb->s_dev); +		__entry->dev	= inode->i_sb->s_dev;  		__entry->ino	= inode->i_ino;  		__entry->data_blocks = EXT4_I(inode)->i_reserved_data_blocks;  		__entry->meta_blocks = EXT4_I(inode)->i_reserved_meta_blocks;  	),  	TP_printk("dev %d,%d ino %lu data_blocks %u meta_blocks %u", -		  __entry->dev_major, __entry->dev_minor, +		  MAJOR(__entry->dev), MINOR(__entry->dev),  		  (unsigned long) __entry->ino,  		  __entry->data_blocks, __entry->meta_blocks)  ); @@ -831,15 +931,8 @@ TRACE_EVENT(ext4_mballoc_alloc,  	TP_ARGS(ac),  	TP_STRUCT__entry( -		__field(	int,   dev_major                ) -		__field(	int,   dev_minor                ) +		__field(	dev_t,	dev			)  		__field(	ino_t,	ino			) -		__field(	__u16,	found			) -		__field(	__u16,	groups			) -		__field(	__u16,	buddy			) -		__field(	__u16,	flags			) -		__field(	__u16,	tail			) -		__field(	__u8,	cr			)  		__field(	__u32, 	orig_logical		)  		__field(	  int,	orig_start		)  		__field(	__u32, 	orig_group		) @@ -852,18 +945,17 @@ TRACE_EVENT(ext4_mballoc_alloc,  		__field(	  int,	result_start		)  		__field(	__u32, 	result_group		)  		__field(	  int,	result_len		) +		__field(	__u16,	found			) +		__field(	__u16,	groups			) +		__field(	__u16,	buddy			) +		__field(	__u16,	flags			) +		__field(	__u16,	tail			) +		__field(	__u8,	cr			)  	),  	TP_fast_assign( -		__entry->dev_major	= MAJOR(ac->ac_inode->i_sb->s_dev); -		__entry->dev_minor	= MINOR(ac->ac_inode->i_sb->s_dev); +		__entry->dev		= ac->ac_inode->i_sb->s_dev;  		__entry->ino		= ac->ac_inode->i_ino; -		__entry->found		= ac->ac_found; -		__entry->flags		= ac->ac_flags; -		__entry->groups		= ac->ac_groups_scanned; -		__entry->buddy		= ac->ac_buddy; -		__entry->tail		= ac->ac_tail; -		__entry->cr		= ac->ac_criteria;  		__entry->orig_logical	= ac->ac_o_ex.fe_logical;  		__entry->orig_start	= ac->ac_o_ex.fe_start;  		__entry->orig_group	= ac->ac_o_ex.fe_group; @@ -876,12 +968,18 @@ TRACE_EVENT(ext4_mballoc_alloc,  		__entry->result_start	= ac->ac_f_ex.fe_start;  		__entry->result_group	= ac->ac_f_ex.fe_group;  		__entry->result_len	= ac->ac_f_ex.fe_len; +		__entry->found		= ac->ac_found; +		__entry->flags		= ac->ac_flags; +		__entry->groups		= ac->ac_groups_scanned; +		__entry->buddy		= ac->ac_buddy; +		__entry->tail		= ac->ac_tail; +		__entry->cr		= ac->ac_criteria;  	),  	TP_printk("dev %d,%d inode %lu orig %u/%d/%u@%u goal %u/%d/%u@%u " -		  "result %u/%d/%u@%u blks %u grps %u cr %u flags 0x%04x " +		  "result %u/%d/%u@%u blks %u grps %u cr %u flags %s "  		  "tail %u broken %u", -		  __entry->dev_major, __entry->dev_minor, +		  MAJOR(__entry->dev), MINOR(__entry->dev),  		  (unsigned long) __entry->ino,  		  __entry->orig_group, __entry->orig_start,  		  __entry->orig_len, __entry->orig_logical, @@ -890,7 +988,7 @@ TRACE_EVENT(ext4_mballoc_alloc,  		  __entry->result_group, __entry->result_start,  		  __entry->result_len, __entry->result_logical,  		  __entry->found, __entry->groups, __entry->cr, -		  __entry->flags, __entry->tail, +		  show_mballoc_flags(__entry->flags), __entry->tail,  		  __entry->buddy ? 1 << __entry->buddy : 0)  ); @@ -900,8 +998,7 @@ TRACE_EVENT(ext4_mballoc_prealloc,  	TP_ARGS(ac),  	TP_STRUCT__entry( -		__field(	int,   dev_major                ) -		__field(	int,   dev_minor                ) +		__field(	dev_t,	dev			)  		__field(	ino_t,	ino			)  		__field(	__u32, 	orig_logical		)  		__field(	  int,	orig_start		) @@ -914,8 +1011,7 @@ TRACE_EVENT(ext4_mballoc_prealloc,  	),  	TP_fast_assign( -		__entry->dev_major	= MAJOR(ac->ac_inode->i_sb->s_dev); -		__entry->dev_minor	= MINOR(ac->ac_inode->i_sb->s_dev); +		__entry->dev		= ac->ac_inode->i_sb->s_dev;  		__entry->ino		= ac->ac_inode->i_ino;  		__entry->orig_logical	= ac->ac_o_ex.fe_logical;  		__entry->orig_start	= ac->ac_o_ex.fe_start; @@ -928,7 +1024,7 @@ TRACE_EVENT(ext4_mballoc_prealloc,  	),  	TP_printk("dev %d,%d inode %lu orig %u/%d/%u@%u result %u/%d/%u@%u", -		  __entry->dev_major, __entry->dev_minor, +		  MAJOR(__entry->dev), MINOR(__entry->dev),  		  (unsigned long) __entry->ino,  		  __entry->orig_group, __entry->orig_start,  		  __entry->orig_len, __entry->orig_logical, @@ -946,8 +1042,7 @@ DECLARE_EVENT_CLASS(ext4__mballoc,  	TP_ARGS(sb, inode, group, start, len),  	TP_STRUCT__entry( -		__field(	int,   dev_major                ) -		__field(	int,   dev_minor                ) +		__field(	dev_t,	dev			)  		__field(	ino_t,	ino			)  		__field(	  int,	result_start		)  		__field(	__u32, 	result_group		) @@ -955,16 +1050,15 @@ DECLARE_EVENT_CLASS(ext4__mballoc,  	),  	TP_fast_assign( -		__entry->dev_major	= MAJOR(sb->s_dev); -		__entry->dev_minor	= MINOR(sb->s_dev); +		__entry->dev		= sb->s_dev;  		__entry->ino		= inode ? inode->i_ino : 0;  		__entry->result_start	= start;  		__entry->result_group	= group;  		__entry->result_len	= len;  	), -	TP_printk("dev %d,%d inode %lu extent %u/%d/%u ", -		  __entry->dev_major, __entry->dev_minor, +	TP_printk("dev %d,%d inode %lu extent %u/%d/%d ", +		  MAJOR(__entry->dev), MINOR(__entry->dev),  		  (unsigned long) __entry->ino,  		  __entry->result_group, __entry->result_start,  		  __entry->result_len) @@ -998,64 +1092,68 @@ TRACE_EVENT(ext4_forget,  	TP_ARGS(inode, is_metadata, block),  	TP_STRUCT__entry( -		__field(	int,   dev_major                ) -		__field(	int,   dev_minor                ) +		__field(	dev_t,	dev			)  		__field(	ino_t,	ino			) -		__field(	umode_t, mode			) -		__field(	int,	is_metadata		)  		__field(	__u64,	block			) +		__field(	int,	is_metadata		) +		__field(	__u16,	mode			)  	),  	TP_fast_assign( -		__entry->dev_major = MAJOR(inode->i_sb->s_dev); -		__entry->dev_minor = MINOR(inode->i_sb->s_dev); +		__entry->dev	= inode->i_sb->s_dev;  		__entry->ino	= inode->i_ino; -		__entry->mode	= inode->i_mode; -		__entry->is_metadata = is_metadata;  		__entry->block	= block; +		__entry->is_metadata = is_metadata; +		__entry->mode	= inode->i_mode;  	),  	TP_printk("dev %d,%d ino %lu mode 0%o is_metadata %d block %llu", -		  __entry->dev_major, __entry->dev_minor, -		  (unsigned long) __entry->ino, __entry->mode, -		  __entry->is_metadata, __entry->block) +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  (unsigned long) __entry->ino, +		  __entry->mode, __entry->is_metadata, __entry->block)  );  TRACE_EVENT(ext4_da_update_reserve_space, -	TP_PROTO(struct inode *inode, int used_blocks), +	TP_PROTO(struct inode *inode, int used_blocks, int quota_claim), -	TP_ARGS(inode, used_blocks), +	TP_ARGS(inode, used_blocks, quota_claim),  	TP_STRUCT__entry( -		__field(	int,   dev_major                ) -		__field(	int,   dev_minor                ) +		__field(	dev_t,	dev			)  		__field(	ino_t,	ino			) -		__field(	umode_t, mode			)  		__field(	__u64,	i_blocks		)  		__field(	int,	used_blocks		)  		__field(	int,	reserved_data_blocks	)  		__field(	int,	reserved_meta_blocks	)  		__field(	int,	allocated_meta_blocks	) +		__field(	int,	quota_claim		) +		__field(	__u16,	mode			)  	),  	TP_fast_assign( -		__entry->dev_major = MAJOR(inode->i_sb->s_dev); -		__entry->dev_minor = MINOR(inode->i_sb->s_dev); +		__entry->dev	= inode->i_sb->s_dev;  		__entry->ino	= inode->i_ino; -		__entry->mode	= inode->i_mode;  		__entry->i_blocks = inode->i_blocks;  		__entry->used_blocks = used_blocks; -		__entry->reserved_data_blocks = EXT4_I(inode)->i_reserved_data_blocks; -		__entry->reserved_meta_blocks = EXT4_I(inode)->i_reserved_meta_blocks; -		__entry->allocated_meta_blocks = EXT4_I(inode)->i_allocated_meta_blocks; +		__entry->reserved_data_blocks = +				EXT4_I(inode)->i_reserved_data_blocks; +		__entry->reserved_meta_blocks = +				EXT4_I(inode)->i_reserved_meta_blocks; +		__entry->allocated_meta_blocks = +				EXT4_I(inode)->i_allocated_meta_blocks; +		__entry->quota_claim = quota_claim; +		__entry->mode	= inode->i_mode;  	), -	TP_printk("dev %d,%d ino %lu mode 0%o i_blocks %llu used_blocks %d reserved_data_blocks %d reserved_meta_blocks %d allocated_meta_blocks %d", -		  __entry->dev_major, __entry->dev_minor, -		  (unsigned long) __entry->ino, __entry->mode, -		  (unsigned long long) __entry->i_blocks, +	TP_printk("dev %d,%d ino %lu mode 0%o i_blocks %llu used_blocks %d " +		  "reserved_data_blocks %d reserved_meta_blocks %d " +		  "allocated_meta_blocks %d quota_claim %d", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  (unsigned long) __entry->ino, +		  __entry->mode, __entry->i_blocks,  		  __entry->used_blocks, __entry->reserved_data_blocks, -		  __entry->reserved_meta_blocks, __entry->allocated_meta_blocks) +		  __entry->reserved_meta_blocks, __entry->allocated_meta_blocks, +		  __entry->quota_claim)  );  TRACE_EVENT(ext4_da_reserve_space, @@ -1064,31 +1162,30 @@ TRACE_EVENT(ext4_da_reserve_space,  	TP_ARGS(inode, md_needed),  	TP_STRUCT__entry( -		__field(	int,   dev_major                ) -		__field(	int,   dev_minor                ) +		__field(	dev_t,	dev			)  		__field(	ino_t,	ino			) -		__field(	umode_t, mode			)  		__field(	__u64,	i_blocks		)  		__field(	int,	md_needed		)  		__field(	int,	reserved_data_blocks	)  		__field(	int,	reserved_meta_blocks	) +		__field(	__u16,  mode			)  	),  	TP_fast_assign( -		__entry->dev_major = MAJOR(inode->i_sb->s_dev); -		__entry->dev_minor = MINOR(inode->i_sb->s_dev); +		__entry->dev	= inode->i_sb->s_dev;  		__entry->ino	= inode->i_ino; -		__entry->mode	= inode->i_mode;  		__entry->i_blocks = inode->i_blocks;  		__entry->md_needed = md_needed;  		__entry->reserved_data_blocks = EXT4_I(inode)->i_reserved_data_blocks;  		__entry->reserved_meta_blocks = EXT4_I(inode)->i_reserved_meta_blocks; +		__entry->mode	= inode->i_mode;  	), -	TP_printk("dev %d,%d ino %lu mode 0%o i_blocks %llu md_needed %d reserved_data_blocks %d reserved_meta_blocks %d", -		  __entry->dev_major, __entry->dev_minor, +	TP_printk("dev %d,%d ino %lu mode 0%o i_blocks %llu md_needed %d " +		  "reserved_data_blocks %d reserved_meta_blocks %d", +		  MAJOR(__entry->dev), MINOR(__entry->dev),  		  (unsigned long) __entry->ino, -		  __entry->mode, (unsigned long long) __entry->i_blocks, +		  __entry->mode, __entry->i_blocks,  		  __entry->md_needed, __entry->reserved_data_blocks,  		  __entry->reserved_meta_blocks)  ); @@ -1099,33 +1196,33 @@ TRACE_EVENT(ext4_da_release_space,  	TP_ARGS(inode, freed_blocks),  	TP_STRUCT__entry( -		__field(	int,   dev_major                ) -		__field(	int,   dev_minor                ) +		__field(	dev_t,	dev			)  		__field(	ino_t,	ino			) -		__field(	umode_t, mode			)  		__field(	__u64,	i_blocks		)  		__field(	int,	freed_blocks		)  		__field(	int,	reserved_data_blocks	)  		__field(	int,	reserved_meta_blocks	)  		__field(	int,	allocated_meta_blocks	) +		__field(	__u16,  mode			)  	),  	TP_fast_assign( -		__entry->dev_major = MAJOR(inode->i_sb->s_dev); -		__entry->dev_minor = MINOR(inode->i_sb->s_dev); +		__entry->dev	= inode->i_sb->s_dev;  		__entry->ino	= inode->i_ino; -		__entry->mode	= inode->i_mode;  		__entry->i_blocks = inode->i_blocks;  		__entry->freed_blocks = freed_blocks;  		__entry->reserved_data_blocks = EXT4_I(inode)->i_reserved_data_blocks;  		__entry->reserved_meta_blocks = EXT4_I(inode)->i_reserved_meta_blocks;  		__entry->allocated_meta_blocks = EXT4_I(inode)->i_allocated_meta_blocks; +		__entry->mode	= inode->i_mode;  	), -	TP_printk("dev %d,%d ino %lu mode 0%o i_blocks %llu freed_blocks %d reserved_data_blocks %d reserved_meta_blocks %d allocated_meta_blocks %d", -		  __entry->dev_major, __entry->dev_minor, +	TP_printk("dev %d,%d ino %lu mode 0%o i_blocks %llu freed_blocks %d " +		  "reserved_data_blocks %d reserved_meta_blocks %d " +		  "allocated_meta_blocks %d", +		  MAJOR(__entry->dev), MINOR(__entry->dev),  		  (unsigned long) __entry->ino, -		  __entry->mode, (unsigned long long) __entry->i_blocks, +		  __entry->mode, __entry->i_blocks,  		  __entry->freed_blocks, __entry->reserved_data_blocks,  		  __entry->reserved_meta_blocks, __entry->allocated_meta_blocks)  ); @@ -1136,20 +1233,19 @@ DECLARE_EVENT_CLASS(ext4__bitmap_load,  	TP_ARGS(sb, group),  	TP_STRUCT__entry( -		__field(	int,   dev_major                ) -		__field(	int,   dev_minor                ) +		__field(	dev_t,	dev			)  		__field(	__u32,	group			)  	),  	TP_fast_assign( -		__entry->dev_major = MAJOR(sb->s_dev); -		__entry->dev_minor = MINOR(sb->s_dev); +		__entry->dev	= sb->s_dev;  		__entry->group	= group;  	),  	TP_printk("dev %d,%d group %u", -		  __entry->dev_major, __entry->dev_minor, __entry->group) +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  __entry->group)  );  DEFINE_EVENT(ext4__bitmap_load, ext4_mb_bitmap_load, @@ -1166,6 +1262,1182 @@ DEFINE_EVENT(ext4__bitmap_load, ext4_mb_buddy_bitmap_load,  	TP_ARGS(sb, group)  ); +DEFINE_EVENT(ext4__bitmap_load, ext4_read_block_bitmap_load, + +	TP_PROTO(struct super_block *sb, unsigned long group), + +	TP_ARGS(sb, group) +); + +DEFINE_EVENT(ext4__bitmap_load, ext4_load_inode_bitmap, + +	TP_PROTO(struct super_block *sb, unsigned long group), + +	TP_ARGS(sb, group) +); + +TRACE_EVENT(ext4_direct_IO_enter, +	TP_PROTO(struct inode *inode, loff_t offset, unsigned long len, int rw), + +	TP_ARGS(inode, offset, len, rw), + +	TP_STRUCT__entry( +		__field(	dev_t,	dev			) +		__field(	ino_t,	ino			) +		__field(	loff_t,	pos			) +		__field(	unsigned long,	len		) +		__field(	int,	rw			) +	), + +	TP_fast_assign( +		__entry->dev	= inode->i_sb->s_dev; +		__entry->ino	= inode->i_ino; +		__entry->pos	= offset; +		__entry->len	= len; +		__entry->rw	= rw; +	), + +	TP_printk("dev %d,%d ino %lu pos %lld len %lu rw %d", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  (unsigned long) __entry->ino, +		  __entry->pos, __entry->len, __entry->rw) +); + +TRACE_EVENT(ext4_direct_IO_exit, +	TP_PROTO(struct inode *inode, loff_t offset, unsigned long len, +		 int rw, int ret), + +	TP_ARGS(inode, offset, len, rw, ret), + +	TP_STRUCT__entry( +		__field(	dev_t,	dev			) +		__field(	ino_t,	ino			) +		__field(	loff_t,	pos			) +		__field(	unsigned long,	len		) +		__field(	int,	rw			) +		__field(	int,	ret			) +	), + +	TP_fast_assign( +		__entry->dev	= inode->i_sb->s_dev; +		__entry->ino	= inode->i_ino; +		__entry->pos	= offset; +		__entry->len	= len; +		__entry->rw	= rw; +		__entry->ret	= ret; +	), + +	TP_printk("dev %d,%d ino %lu pos %lld len %lu rw %d ret %d", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  (unsigned long) __entry->ino, +		  __entry->pos, __entry->len, +		  __entry->rw, __entry->ret) +); + +DECLARE_EVENT_CLASS(ext4__fallocate_mode, +	TP_PROTO(struct inode *inode, loff_t offset, loff_t len, int mode), + +	TP_ARGS(inode, offset, len, mode), + +	TP_STRUCT__entry( +		__field(	dev_t,	dev			) +		__field(	ino_t,	ino			) +		__field(	loff_t,	offset			) +		__field(	loff_t, len			) +		__field(	int,	mode			) +	), + +	TP_fast_assign( +		__entry->dev	= inode->i_sb->s_dev; +		__entry->ino	= inode->i_ino; +		__entry->offset	= offset; +		__entry->len	= len; +		__entry->mode	= mode; +	), + +	TP_printk("dev %d,%d ino %lu offset %lld len %lld mode %s", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  (unsigned long) __entry->ino, +		  __entry->offset, __entry->len, +		  show_falloc_mode(__entry->mode)) +); + +DEFINE_EVENT(ext4__fallocate_mode, ext4_fallocate_enter, + +	TP_PROTO(struct inode *inode, loff_t offset, loff_t len, int mode), + +	TP_ARGS(inode, offset, len, mode) +); + +DEFINE_EVENT(ext4__fallocate_mode, ext4_punch_hole, + +	TP_PROTO(struct inode *inode, loff_t offset, loff_t len, int mode), + +	TP_ARGS(inode, offset, len, mode) +); + +DEFINE_EVENT(ext4__fallocate_mode, ext4_zero_range, + +	TP_PROTO(struct inode *inode, loff_t offset, loff_t len, int mode), + +	TP_ARGS(inode, offset, len, mode) +); + +TRACE_EVENT(ext4_fallocate_exit, +	TP_PROTO(struct inode *inode, loff_t offset, +		 unsigned int max_blocks, int ret), + +	TP_ARGS(inode, offset, max_blocks, ret), + +	TP_STRUCT__entry( +		__field(	dev_t,	dev			) +		__field(	ino_t,	ino			) +		__field(	loff_t,	pos			) +		__field(	unsigned int,	blocks		) +		__field(	int, 	ret			) +	), + +	TP_fast_assign( +		__entry->dev	= inode->i_sb->s_dev; +		__entry->ino	= inode->i_ino; +		__entry->pos	= offset; +		__entry->blocks	= max_blocks; +		__entry->ret	= ret; +	), + +	TP_printk("dev %d,%d ino %lu pos %lld blocks %u ret %d", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  (unsigned long) __entry->ino, +		  __entry->pos, __entry->blocks, +		  __entry->ret) +); + +TRACE_EVENT(ext4_unlink_enter, +	TP_PROTO(struct inode *parent, struct dentry *dentry), + +	TP_ARGS(parent, dentry), + +	TP_STRUCT__entry( +		__field(	dev_t,	dev			) +		__field(	ino_t,	ino			) +		__field(	ino_t,	parent			) +		__field(	loff_t,	size			) +	), + +	TP_fast_assign( +		__entry->dev		= dentry->d_inode->i_sb->s_dev; +		__entry->ino		= dentry->d_inode->i_ino; +		__entry->parent		= parent->i_ino; +		__entry->size		= dentry->d_inode->i_size; +	), + +	TP_printk("dev %d,%d ino %lu size %lld parent %lu", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  (unsigned long) __entry->ino, __entry->size, +		  (unsigned long) __entry->parent) +); + +TRACE_EVENT(ext4_unlink_exit, +	TP_PROTO(struct dentry *dentry, int ret), + +	TP_ARGS(dentry, ret), + +	TP_STRUCT__entry( +		__field(	dev_t,	dev			) +		__field(	ino_t,	ino			) +		__field(	int,	ret			) +	), + +	TP_fast_assign( +		__entry->dev		= dentry->d_inode->i_sb->s_dev; +		__entry->ino		= dentry->d_inode->i_ino; +		__entry->ret		= ret; +	), + +	TP_printk("dev %d,%d ino %lu ret %d", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  (unsigned long) __entry->ino, +		  __entry->ret) +); + +DECLARE_EVENT_CLASS(ext4__truncate, +	TP_PROTO(struct inode *inode), + +	TP_ARGS(inode), + +	TP_STRUCT__entry( +		__field(	dev_t,		dev		) +		__field(	ino_t,		ino		) +		__field(	__u64,		blocks		) +	), + +	TP_fast_assign( +		__entry->dev    = inode->i_sb->s_dev; +		__entry->ino    = inode->i_ino; +		__entry->blocks	= inode->i_blocks; +	), + +	TP_printk("dev %d,%d ino %lu blocks %llu", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  (unsigned long) __entry->ino, __entry->blocks) +); + +DEFINE_EVENT(ext4__truncate, ext4_truncate_enter, + +	TP_PROTO(struct inode *inode), + +	TP_ARGS(inode) +); + +DEFINE_EVENT(ext4__truncate, ext4_truncate_exit, + +	TP_PROTO(struct inode *inode), + +	TP_ARGS(inode) +); + +/* 'ux' is the unwritten extent. */ +TRACE_EVENT(ext4_ext_convert_to_initialized_enter, +	TP_PROTO(struct inode *inode, struct ext4_map_blocks *map, +		 struct ext4_extent *ux), + +	TP_ARGS(inode, map, ux), + +	TP_STRUCT__entry( +		__field(	dev_t,		dev	) +		__field(	ino_t,		ino	) +		__field(	ext4_lblk_t,	m_lblk	) +		__field(	unsigned,	m_len	) +		__field(	ext4_lblk_t,	u_lblk	) +		__field(	unsigned,	u_len	) +		__field(	ext4_fsblk_t,	u_pblk	) +	), + +	TP_fast_assign( +		__entry->dev		= inode->i_sb->s_dev; +		__entry->ino		= inode->i_ino; +		__entry->m_lblk		= map->m_lblk; +		__entry->m_len		= map->m_len; +		__entry->u_lblk		= le32_to_cpu(ux->ee_block); +		__entry->u_len		= ext4_ext_get_actual_len(ux); +		__entry->u_pblk		= ext4_ext_pblock(ux); +	), + +	TP_printk("dev %d,%d ino %lu m_lblk %u m_len %u u_lblk %u u_len %u " +		  "u_pblk %llu", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  (unsigned long) __entry->ino, +		  __entry->m_lblk, __entry->m_len, +		  __entry->u_lblk, __entry->u_len, __entry->u_pblk) +); + +/* + * 'ux' is the unwritten extent. + * 'ix' is the initialized extent to which blocks are transferred. + */ +TRACE_EVENT(ext4_ext_convert_to_initialized_fastpath, +	TP_PROTO(struct inode *inode, struct ext4_map_blocks *map, +		 struct ext4_extent *ux, struct ext4_extent *ix), + +	TP_ARGS(inode, map, ux, ix), + +	TP_STRUCT__entry( +		__field(	dev_t,		dev	) +		__field(	ino_t,		ino	) +		__field(	ext4_lblk_t,	m_lblk	) +		__field(	unsigned,	m_len	) +		__field(	ext4_lblk_t,	u_lblk	) +		__field(	unsigned,	u_len	) +		__field(	ext4_fsblk_t,	u_pblk	) +		__field(	ext4_lblk_t,	i_lblk	) +		__field(	unsigned,	i_len	) +		__field(	ext4_fsblk_t,	i_pblk	) +	), + +	TP_fast_assign( +		__entry->dev		= inode->i_sb->s_dev; +		__entry->ino		= inode->i_ino; +		__entry->m_lblk		= map->m_lblk; +		__entry->m_len		= map->m_len; +		__entry->u_lblk		= le32_to_cpu(ux->ee_block); +		__entry->u_len		= ext4_ext_get_actual_len(ux); +		__entry->u_pblk		= ext4_ext_pblock(ux); +		__entry->i_lblk		= le32_to_cpu(ix->ee_block); +		__entry->i_len		= ext4_ext_get_actual_len(ix); +		__entry->i_pblk		= ext4_ext_pblock(ix); +	), + +	TP_printk("dev %d,%d ino %lu m_lblk %u m_len %u " +		  "u_lblk %u u_len %u u_pblk %llu " +		  "i_lblk %u i_len %u i_pblk %llu ", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  (unsigned long) __entry->ino, +		  __entry->m_lblk, __entry->m_len, +		  __entry->u_lblk, __entry->u_len, __entry->u_pblk, +		  __entry->i_lblk, __entry->i_len, __entry->i_pblk) +); + +DECLARE_EVENT_CLASS(ext4__map_blocks_enter, +	TP_PROTO(struct inode *inode, ext4_lblk_t lblk, +		 unsigned int len, unsigned int flags), + +	TP_ARGS(inode, lblk, len, flags), + +	TP_STRUCT__entry( +		__field(	dev_t,		dev		) +		__field(	ino_t,		ino		) +		__field(	ext4_lblk_t,	lblk		) +		__field(	unsigned int,	len		) +		__field(	unsigned int,	flags		) +	), + +	TP_fast_assign( +		__entry->dev    = inode->i_sb->s_dev; +		__entry->ino    = inode->i_ino; +		__entry->lblk	= lblk; +		__entry->len	= len; +		__entry->flags	= flags; +	), + +	TP_printk("dev %d,%d ino %lu lblk %u len %u flags %s", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  (unsigned long) __entry->ino, +		  __entry->lblk, __entry->len, show_map_flags(__entry->flags)) +); + +DEFINE_EVENT(ext4__map_blocks_enter, ext4_ext_map_blocks_enter, +	TP_PROTO(struct inode *inode, ext4_lblk_t lblk, +		 unsigned len, unsigned flags), + +	TP_ARGS(inode, lblk, len, flags) +); + +DEFINE_EVENT(ext4__map_blocks_enter, ext4_ind_map_blocks_enter, +	TP_PROTO(struct inode *inode, ext4_lblk_t lblk, +		 unsigned len, unsigned flags), + +	TP_ARGS(inode, lblk, len, flags) +); + +DECLARE_EVENT_CLASS(ext4__map_blocks_exit, +	TP_PROTO(struct inode *inode, unsigned flags, struct ext4_map_blocks *map, +		 int ret), + +	TP_ARGS(inode, flags, map, ret), + +	TP_STRUCT__entry( +		__field(	dev_t,		dev		) +		__field(	ino_t,		ino		) +		__field(	unsigned int,	flags		) +		__field(	ext4_fsblk_t,	pblk		) +		__field(	ext4_lblk_t,	lblk		) +		__field(	unsigned int,	len		) +		__field(	unsigned int,	mflags		) +		__field(	int,		ret		) +	), + +	TP_fast_assign( +		__entry->dev    = inode->i_sb->s_dev; +		__entry->ino    = inode->i_ino; +		__entry->flags	= flags; +		__entry->pblk	= map->m_pblk; +		__entry->lblk	= map->m_lblk; +		__entry->len	= map->m_len; +		__entry->mflags	= map->m_flags; +		__entry->ret	= ret; +	), + +	TP_printk("dev %d,%d ino %lu flags %s lblk %u pblk %llu len %u " +		  "mflags %s ret %d", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  (unsigned long) __entry->ino, +		  show_map_flags(__entry->flags), __entry->lblk, __entry->pblk, +		  __entry->len, show_mflags(__entry->mflags), __entry->ret) +); + +DEFINE_EVENT(ext4__map_blocks_exit, ext4_ext_map_blocks_exit, +	TP_PROTO(struct inode *inode, unsigned flags, +		 struct ext4_map_blocks *map, int ret), + +	TP_ARGS(inode, flags, map, ret) +); + +DEFINE_EVENT(ext4__map_blocks_exit, ext4_ind_map_blocks_exit, +	TP_PROTO(struct inode *inode, unsigned flags, +		 struct ext4_map_blocks *map, int ret), + +	TP_ARGS(inode, flags, map, ret) +); + +TRACE_EVENT(ext4_ext_load_extent, +	TP_PROTO(struct inode *inode, ext4_lblk_t lblk, ext4_fsblk_t pblk), + +	TP_ARGS(inode, lblk, pblk), + +	TP_STRUCT__entry( +		__field(	dev_t,		dev		) +		__field(	ino_t,		ino		) +		__field(	ext4_fsblk_t,	pblk		) +		__field(	ext4_lblk_t,	lblk		) +	), + +	TP_fast_assign( +		__entry->dev    = inode->i_sb->s_dev; +		__entry->ino    = inode->i_ino; +		__entry->pblk	= pblk; +		__entry->lblk	= lblk; +	), + +	TP_printk("dev %d,%d ino %lu lblk %u pblk %llu", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  (unsigned long) __entry->ino, +		  __entry->lblk, __entry->pblk) +); + +TRACE_EVENT(ext4_load_inode, +	TP_PROTO(struct inode *inode), + +	TP_ARGS(inode), + +	TP_STRUCT__entry( +		__field(	dev_t,	dev		) +		__field(	ino_t,	ino		) +	), + +	TP_fast_assign( +		__entry->dev		= inode->i_sb->s_dev; +		__entry->ino		= inode->i_ino; +	), + +	TP_printk("dev %d,%d ino %ld", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  (unsigned long) __entry->ino) +); + +TRACE_EVENT(ext4_journal_start, +	TP_PROTO(struct super_block *sb, int blocks, int rsv_blocks, +		 unsigned long IP), + +	TP_ARGS(sb, blocks, rsv_blocks, IP), + +	TP_STRUCT__entry( +		__field(	dev_t,	dev			) +		__field(unsigned long,	ip			) +		__field(	  int,	blocks			) +		__field(	  int,	rsv_blocks		) +	), + +	TP_fast_assign( +		__entry->dev		 = sb->s_dev; +		__entry->ip		 = IP; +		__entry->blocks		 = blocks; +		__entry->rsv_blocks	 = rsv_blocks; +	), + +	TP_printk("dev %d,%d blocks, %d rsv_blocks, %d caller %pF", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  __entry->blocks, __entry->rsv_blocks, (void *)__entry->ip) +); + +TRACE_EVENT(ext4_journal_start_reserved, +	TP_PROTO(struct super_block *sb, int blocks, unsigned long IP), + +	TP_ARGS(sb, blocks, IP), + +	TP_STRUCT__entry( +		__field(	dev_t,	dev			) +		__field(unsigned long,	ip			) +		__field(	  int,	blocks			) +	), + +	TP_fast_assign( +		__entry->dev		 = sb->s_dev; +		__entry->ip		 = IP; +		__entry->blocks		 = blocks; +	), + +	TP_printk("dev %d,%d blocks, %d caller %pF", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  __entry->blocks, (void *)__entry->ip) +); + +DECLARE_EVENT_CLASS(ext4__trim, +	TP_PROTO(struct super_block *sb, +		 ext4_group_t group, +		 ext4_grpblk_t start, +		 ext4_grpblk_t len), + +	TP_ARGS(sb, group, start, len), + +	TP_STRUCT__entry( +		__field(	int,	dev_major		) +		__field(	int,	dev_minor		) +		__field(	__u32, 	group			) +		__field(	int,	start			) +		__field(	int,	len			) +	), + +	TP_fast_assign( +		__entry->dev_major	= MAJOR(sb->s_dev); +		__entry->dev_minor	= MINOR(sb->s_dev); +		__entry->group		= group; +		__entry->start		= start; +		__entry->len		= len; +	), + +	TP_printk("dev %d,%d group %u, start %d, len %d", +		  __entry->dev_major, __entry->dev_minor, +		  __entry->group, __entry->start, __entry->len) +); + +DEFINE_EVENT(ext4__trim, ext4_trim_extent, + +	TP_PROTO(struct super_block *sb, +		 ext4_group_t group, +		 ext4_grpblk_t start, +		 ext4_grpblk_t len), + +	TP_ARGS(sb, group, start, len) +); + +DEFINE_EVENT(ext4__trim, ext4_trim_all_free, + +	TP_PROTO(struct super_block *sb, +		 ext4_group_t group, +		 ext4_grpblk_t start, +		 ext4_grpblk_t len), + +	TP_ARGS(sb, group, start, len) +); + +TRACE_EVENT(ext4_ext_handle_unwritten_extents, +	TP_PROTO(struct inode *inode, struct ext4_map_blocks *map, int flags, +		 unsigned int allocated, ext4_fsblk_t newblock), + +	TP_ARGS(inode, map, flags, allocated, newblock), + +	TP_STRUCT__entry( +		__field(	dev_t,		dev		) +		__field(	ino_t,		ino		) +		__field(	int,		flags		) +		__field(	ext4_lblk_t,	lblk		) +		__field(	ext4_fsblk_t,	pblk		) +		__field(	unsigned int,	len		) +		__field(	unsigned int,	allocated	) +		__field(	ext4_fsblk_t,	newblk		) +	), + +	TP_fast_assign( +		__entry->dev		= inode->i_sb->s_dev; +		__entry->ino		= inode->i_ino; +		__entry->flags		= flags; +		__entry->lblk		= map->m_lblk; +		__entry->pblk		= map->m_pblk; +		__entry->len		= map->m_len; +		__entry->allocated	= allocated; +		__entry->newblk		= newblock; +	), + +	TP_printk("dev %d,%d ino %lu m_lblk %u m_pblk %llu m_len %u flags %s " +		  "allocated %d newblock %llu", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  (unsigned long) __entry->ino, +		  (unsigned) __entry->lblk, (unsigned long long) __entry->pblk, +		  __entry->len, show_map_flags(__entry->flags), +		  (unsigned int) __entry->allocated, +		  (unsigned long long) __entry->newblk) +); + +TRACE_EVENT(ext4_get_implied_cluster_alloc_exit, +	TP_PROTO(struct super_block *sb, struct ext4_map_blocks *map, int ret), + +	TP_ARGS(sb, map, ret), + +	TP_STRUCT__entry( +		__field(	dev_t,		dev	) +		__field(	unsigned int,	flags	) +		__field(	ext4_lblk_t,	lblk	) +		__field(	ext4_fsblk_t,	pblk	) +		__field(	unsigned int,	len	) +		__field(	int,		ret	) +	), + +	TP_fast_assign( +		__entry->dev	= sb->s_dev; +		__entry->flags	= map->m_flags; +		__entry->lblk	= map->m_lblk; +		__entry->pblk	= map->m_pblk; +		__entry->len	= map->m_len; +		__entry->ret	= ret; +	), + +	TP_printk("dev %d,%d m_lblk %u m_pblk %llu m_len %u m_flags %s ret %d", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  __entry->lblk, (unsigned long long) __entry->pblk, +		  __entry->len, show_mflags(__entry->flags), __entry->ret) +); + +TRACE_EVENT(ext4_ext_put_in_cache, +	TP_PROTO(struct inode *inode, ext4_lblk_t lblk, unsigned int len, +		 ext4_fsblk_t start), + +	TP_ARGS(inode, lblk, len, start), + +	TP_STRUCT__entry( +		__field(	dev_t,		dev	) +		__field(	ino_t,		ino	) +		__field(	ext4_lblk_t,	lblk	) +		__field(	unsigned int,	len	) +		__field(	ext4_fsblk_t,	start	) +	), + +	TP_fast_assign( +		__entry->dev	= inode->i_sb->s_dev; +		__entry->ino	= inode->i_ino; +		__entry->lblk	= lblk; +		__entry->len	= len; +		__entry->start	= start; +	), + +	TP_printk("dev %d,%d ino %lu lblk %u len %u start %llu", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  (unsigned long) __entry->ino, +		  (unsigned) __entry->lblk, +		  __entry->len, +		  (unsigned long long) __entry->start) +); + +TRACE_EVENT(ext4_ext_in_cache, +	TP_PROTO(struct inode *inode, ext4_lblk_t lblk, int ret), + +	TP_ARGS(inode, lblk, ret), + +	TP_STRUCT__entry( +		__field(	dev_t,		dev	) +		__field(	ino_t,		ino	) +		__field(	ext4_lblk_t,	lblk	) +		__field(	int,		ret	) +	), + +	TP_fast_assign( +		__entry->dev	= inode->i_sb->s_dev; +		__entry->ino	= inode->i_ino; +		__entry->lblk	= lblk; +		__entry->ret	= ret; +	), + +	TP_printk("dev %d,%d ino %lu lblk %u ret %d", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  (unsigned long) __entry->ino, +		  (unsigned) __entry->lblk, +		  __entry->ret) + +); + +TRACE_EVENT(ext4_find_delalloc_range, +	TP_PROTO(struct inode *inode, ext4_lblk_t from, ext4_lblk_t to, +		int reverse, int found, ext4_lblk_t found_blk), + +	TP_ARGS(inode, from, to, reverse, found, found_blk), + +	TP_STRUCT__entry( +		__field(	dev_t,		dev		) +		__field(	ino_t,		ino		) +		__field(	ext4_lblk_t,	from		) +		__field(	ext4_lblk_t,	to		) +		__field(	int,		reverse		) +		__field(	int,		found		) +		__field(	ext4_lblk_t,	found_blk	) +	), + +	TP_fast_assign( +		__entry->dev		= inode->i_sb->s_dev; +		__entry->ino		= inode->i_ino; +		__entry->from		= from; +		__entry->to		= to; +		__entry->reverse	= reverse; +		__entry->found		= found; +		__entry->found_blk	= found_blk; +	), + +	TP_printk("dev %d,%d ino %lu from %u to %u reverse %d found %d " +		  "(blk = %u)", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  (unsigned long) __entry->ino, +		  (unsigned) __entry->from, (unsigned) __entry->to, +		  __entry->reverse, __entry->found, +		  (unsigned) __entry->found_blk) +); + +TRACE_EVENT(ext4_get_reserved_cluster_alloc, +	TP_PROTO(struct inode *inode, ext4_lblk_t lblk, unsigned int len), + +	TP_ARGS(inode, lblk, len), + +	TP_STRUCT__entry( +		__field(	dev_t,		dev	) +		__field(	ino_t,		ino	) +		__field(	ext4_lblk_t,	lblk	) +		__field(	unsigned int,	len	) +	), + +	TP_fast_assign( +		__entry->dev	= inode->i_sb->s_dev; +		__entry->ino	= inode->i_ino; +		__entry->lblk	= lblk; +		__entry->len	= len; +	), + +	TP_printk("dev %d,%d ino %lu lblk %u len %u", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  (unsigned long) __entry->ino, +		  (unsigned) __entry->lblk, +		  __entry->len) +); + +TRACE_EVENT(ext4_ext_show_extent, +	TP_PROTO(struct inode *inode, ext4_lblk_t lblk, ext4_fsblk_t pblk, +		 unsigned short len), + +	TP_ARGS(inode, lblk, pblk, len), + +	TP_STRUCT__entry( +		__field(	dev_t,		dev	) +		__field(	ino_t,		ino	) +		__field(	ext4_fsblk_t,	pblk	) +		__field(	ext4_lblk_t,	lblk	) +		__field(	unsigned short,	len	) +	), + +	TP_fast_assign( +		__entry->dev	= inode->i_sb->s_dev; +		__entry->ino	= inode->i_ino; +		__entry->pblk	= pblk; +		__entry->lblk	= lblk; +		__entry->len	= len; +	), + +	TP_printk("dev %d,%d ino %lu lblk %u pblk %llu len %u", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  (unsigned long) __entry->ino, +		  (unsigned) __entry->lblk, +		  (unsigned long long) __entry->pblk, +		  (unsigned short) __entry->len) +); + +TRACE_EVENT(ext4_remove_blocks, +	    TP_PROTO(struct inode *inode, struct ext4_extent *ex, +		ext4_lblk_t from, ext4_fsblk_t to, +		long long partial_cluster), + +	TP_ARGS(inode, ex, from, to, partial_cluster), + +	TP_STRUCT__entry( +		__field(	dev_t,		dev	) +		__field(	ino_t,		ino	) +		__field(	ext4_lblk_t,	from	) +		__field(	ext4_lblk_t,	to	) +		__field(	long long,	partial	) +		__field(	ext4_fsblk_t,	ee_pblk	) +		__field(	ext4_lblk_t,	ee_lblk	) +		__field(	unsigned short,	ee_len	) +	), + +	TP_fast_assign( +		__entry->dev		= inode->i_sb->s_dev; +		__entry->ino		= inode->i_ino; +		__entry->from		= from; +		__entry->to		= to; +		__entry->partial	= partial_cluster; +		__entry->ee_pblk	= ext4_ext_pblock(ex); +		__entry->ee_lblk	= le32_to_cpu(ex->ee_block); +		__entry->ee_len		= ext4_ext_get_actual_len(ex); +	), + +	TP_printk("dev %d,%d ino %lu extent [%u(%llu), %u]" +		  "from %u to %u partial_cluster %lld", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  (unsigned long) __entry->ino, +		  (unsigned) __entry->ee_lblk, +		  (unsigned long long) __entry->ee_pblk, +		  (unsigned short) __entry->ee_len, +		  (unsigned) __entry->from, +		  (unsigned) __entry->to, +		  (long long) __entry->partial) +); + +TRACE_EVENT(ext4_ext_rm_leaf, +	TP_PROTO(struct inode *inode, ext4_lblk_t start, +		 struct ext4_extent *ex, +		 long long partial_cluster), + +	TP_ARGS(inode, start, ex, partial_cluster), + +	TP_STRUCT__entry( +		__field(	dev_t,		dev	) +		__field(	ino_t,		ino	) +		__field(	long long,	partial	) +		__field(	ext4_lblk_t,	start	) +		__field(	ext4_lblk_t,	ee_lblk	) +		__field(	ext4_fsblk_t,	ee_pblk	) +		__field(	short,		ee_len	) +	), + +	TP_fast_assign( +		__entry->dev		= inode->i_sb->s_dev; +		__entry->ino		= inode->i_ino; +		__entry->partial	= partial_cluster; +		__entry->start		= start; +		__entry->ee_lblk	= le32_to_cpu(ex->ee_block); +		__entry->ee_pblk	= ext4_ext_pblock(ex); +		__entry->ee_len		= ext4_ext_get_actual_len(ex); +	), + +	TP_printk("dev %d,%d ino %lu start_lblk %u last_extent [%u(%llu), %u]" +		  "partial_cluster %lld", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  (unsigned long) __entry->ino, +		  (unsigned) __entry->start, +		  (unsigned) __entry->ee_lblk, +		  (unsigned long long) __entry->ee_pblk, +		  (unsigned short) __entry->ee_len, +		  (long long) __entry->partial) +); + +TRACE_EVENT(ext4_ext_rm_idx, +	TP_PROTO(struct inode *inode, ext4_fsblk_t pblk), + +	TP_ARGS(inode, pblk), + +	TP_STRUCT__entry( +		__field(	dev_t,		dev	) +		__field(	ino_t,		ino	) +		__field(	ext4_fsblk_t,	pblk	) +	), + +	TP_fast_assign( +		__entry->dev	= inode->i_sb->s_dev; +		__entry->ino	= inode->i_ino; +		__entry->pblk	= pblk; +	), + +	TP_printk("dev %d,%d ino %lu index_pblk %llu", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  (unsigned long) __entry->ino, +		  (unsigned long long) __entry->pblk) +); + +TRACE_EVENT(ext4_ext_remove_space, +	TP_PROTO(struct inode *inode, ext4_lblk_t start, +		 ext4_lblk_t end, int depth), + +	TP_ARGS(inode, start, end, depth), + +	TP_STRUCT__entry( +		__field(	dev_t,		dev	) +		__field(	ino_t,		ino	) +		__field(	ext4_lblk_t,	start	) +		__field(	ext4_lblk_t,	end	) +		__field(	int,		depth	) +	), + +	TP_fast_assign( +		__entry->dev	= inode->i_sb->s_dev; +		__entry->ino	= inode->i_ino; +		__entry->start	= start; +		__entry->end	= end; +		__entry->depth	= depth; +	), + +	TP_printk("dev %d,%d ino %lu since %u end %u depth %d", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  (unsigned long) __entry->ino, +		  (unsigned) __entry->start, +		  (unsigned) __entry->end, +		  __entry->depth) +); + +TRACE_EVENT(ext4_ext_remove_space_done, +	TP_PROTO(struct inode *inode, ext4_lblk_t start, ext4_lblk_t end, +		 int depth, long long partial, __le16 eh_entries), + +	TP_ARGS(inode, start, end, depth, partial, eh_entries), + +	TP_STRUCT__entry( +		__field(	dev_t,		dev		) +		__field(	ino_t,		ino		) +		__field(	ext4_lblk_t,	start		) +		__field(	ext4_lblk_t,	end		) +		__field(	int,		depth		) +		__field(	long long,	partial		) +		__field(	unsigned short,	eh_entries	) +	), + +	TP_fast_assign( +		__entry->dev		= inode->i_sb->s_dev; +		__entry->ino		= inode->i_ino; +		__entry->start		= start; +		__entry->end		= end; +		__entry->depth		= depth; +		__entry->partial	= partial; +		__entry->eh_entries	= le16_to_cpu(eh_entries); +	), + +	TP_printk("dev %d,%d ino %lu since %u end %u depth %d partial %lld " +		  "remaining_entries %u", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  (unsigned long) __entry->ino, +		  (unsigned) __entry->start, +		  (unsigned) __entry->end, +		  __entry->depth, +		  (long long) __entry->partial, +		  (unsigned short) __entry->eh_entries) +); + +DECLARE_EVENT_CLASS(ext4__es_extent, +	TP_PROTO(struct inode *inode, struct extent_status *es), + +	TP_ARGS(inode, es), + +	TP_STRUCT__entry( +		__field(	dev_t,		dev		) +		__field(	ino_t,		ino		) +		__field(	ext4_lblk_t,	lblk		) +		__field(	ext4_lblk_t,	len		) +		__field(	ext4_fsblk_t,	pblk		) +		__field(	char, status	) +	), + +	TP_fast_assign( +		__entry->dev	= inode->i_sb->s_dev; +		__entry->ino	= inode->i_ino; +		__entry->lblk	= es->es_lblk; +		__entry->len	= es->es_len; +		__entry->pblk	= ext4_es_pblock(es); +		__entry->status	= ext4_es_status(es); +	), + +	TP_printk("dev %d,%d ino %lu es [%u/%u) mapped %llu status %s", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  (unsigned long) __entry->ino, +		  __entry->lblk, __entry->len, +		  __entry->pblk, show_extent_status(__entry->status)) +); + +DEFINE_EVENT(ext4__es_extent, ext4_es_insert_extent, +	TP_PROTO(struct inode *inode, struct extent_status *es), + +	TP_ARGS(inode, es) +); + +DEFINE_EVENT(ext4__es_extent, ext4_es_cache_extent, +	TP_PROTO(struct inode *inode, struct extent_status *es), + +	TP_ARGS(inode, es) +); + +TRACE_EVENT(ext4_es_remove_extent, +	TP_PROTO(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t len), + +	TP_ARGS(inode, lblk, len), + +	TP_STRUCT__entry( +		__field(	dev_t,	dev			) +		__field(	ino_t,	ino			) +		__field(	loff_t,	lblk			) +		__field(	loff_t,	len			) +	), + +	TP_fast_assign( +		__entry->dev	= inode->i_sb->s_dev; +		__entry->ino	= inode->i_ino; +		__entry->lblk	= lblk; +		__entry->len	= len; +	), + +	TP_printk("dev %d,%d ino %lu es [%lld/%lld)", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  (unsigned long) __entry->ino, +		  __entry->lblk, __entry->len) +); + +TRACE_EVENT(ext4_es_find_delayed_extent_range_enter, +	TP_PROTO(struct inode *inode, ext4_lblk_t lblk), + +	TP_ARGS(inode, lblk), + +	TP_STRUCT__entry( +		__field(	dev_t,		dev		) +		__field(	ino_t,		ino		) +		__field(	ext4_lblk_t,	lblk		) +	), + +	TP_fast_assign( +		__entry->dev	= inode->i_sb->s_dev; +		__entry->ino	= inode->i_ino; +		__entry->lblk	= lblk; +	), + +	TP_printk("dev %d,%d ino %lu lblk %u", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  (unsigned long) __entry->ino, __entry->lblk) +); + +TRACE_EVENT(ext4_es_find_delayed_extent_range_exit, +	TP_PROTO(struct inode *inode, struct extent_status *es), + +	TP_ARGS(inode, es), + +	TP_STRUCT__entry( +		__field(	dev_t,		dev		) +		__field(	ino_t,		ino		) +		__field(	ext4_lblk_t,	lblk		) +		__field(	ext4_lblk_t,	len		) +		__field(	ext4_fsblk_t,	pblk		) +		__field(	char, status	) +	), + +	TP_fast_assign( +		__entry->dev	= inode->i_sb->s_dev; +		__entry->ino	= inode->i_ino; +		__entry->lblk	= es->es_lblk; +		__entry->len	= es->es_len; +		__entry->pblk	= ext4_es_pblock(es); +		__entry->status	= ext4_es_status(es); +	), + +	TP_printk("dev %d,%d ino %lu es [%u/%u) mapped %llu status %s", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  (unsigned long) __entry->ino, +		  __entry->lblk, __entry->len, +		  __entry->pblk, show_extent_status(__entry->status)) +); + +TRACE_EVENT(ext4_es_lookup_extent_enter, +	TP_PROTO(struct inode *inode, ext4_lblk_t lblk), + +	TP_ARGS(inode, lblk), + +	TP_STRUCT__entry( +		__field(	dev_t,		dev		) +		__field(	ino_t,		ino		) +		__field(	ext4_lblk_t,	lblk		) +	), + +	TP_fast_assign( +		__entry->dev	= inode->i_sb->s_dev; +		__entry->ino	= inode->i_ino; +		__entry->lblk	= lblk; +	), + +	TP_printk("dev %d,%d ino %lu lblk %u", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  (unsigned long) __entry->ino, __entry->lblk) +); + +TRACE_EVENT(ext4_es_lookup_extent_exit, +	TP_PROTO(struct inode *inode, struct extent_status *es, +		 int found), + +	TP_ARGS(inode, es, found), + +	TP_STRUCT__entry( +		__field(	dev_t,		dev		) +		__field(	ino_t,		ino		) +		__field(	ext4_lblk_t,	lblk		) +		__field(	ext4_lblk_t,	len		) +		__field(	ext4_fsblk_t,	pblk		) +		__field(	char,		status		) +		__field(	int,		found		) +	), + +	TP_fast_assign( +		__entry->dev	= inode->i_sb->s_dev; +		__entry->ino	= inode->i_ino; +		__entry->lblk	= es->es_lblk; +		__entry->len	= es->es_len; +		__entry->pblk	= ext4_es_pblock(es); +		__entry->status	= ext4_es_status(es); +		__entry->found	= found; +	), + +	TP_printk("dev %d,%d ino %lu found %d [%u/%u) %llu %s", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  (unsigned long) __entry->ino, __entry->found, +		  __entry->lblk, __entry->len, +		  __entry->found ? __entry->pblk : 0, +		  show_extent_status(__entry->found ? __entry->status : 0)) +); + +TRACE_EVENT(ext4_es_shrink_enter, +	TP_PROTO(struct super_block *sb, int nr_to_scan, int cache_cnt), + +	TP_ARGS(sb, nr_to_scan, cache_cnt), + +	TP_STRUCT__entry( +		__field(	dev_t,	dev			) +		__field(	int,	nr_to_scan		) +		__field(	int,	cache_cnt		) +	), + +	TP_fast_assign( +		__entry->dev		= sb->s_dev; +		__entry->nr_to_scan	= nr_to_scan; +		__entry->cache_cnt	= cache_cnt; +	), + +	TP_printk("dev %d,%d nr_to_scan %d cache_cnt %d", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  __entry->nr_to_scan, __entry->cache_cnt) +); + +TRACE_EVENT(ext4_es_shrink_exit, +	TP_PROTO(struct super_block *sb, int shrunk_nr, int cache_cnt), + +	TP_ARGS(sb, shrunk_nr, cache_cnt), + +	TP_STRUCT__entry( +		__field(	dev_t,	dev			) +		__field(	int,	shrunk_nr		) +		__field(	int,	cache_cnt		) +	), + +	TP_fast_assign( +		__entry->dev		= sb->s_dev; +		__entry->shrunk_nr	= shrunk_nr; +		__entry->cache_cnt	= cache_cnt; +	), + +	TP_printk("dev %d,%d shrunk_nr %d cache_cnt %d", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  __entry->shrunk_nr, __entry->cache_cnt) +); + +TRACE_EVENT(ext4_collapse_range, +	TP_PROTO(struct inode *inode, loff_t offset, loff_t len), + +	TP_ARGS(inode, offset, len), + +	TP_STRUCT__entry( +		__field(dev_t,	dev) +		__field(ino_t,	ino) +		__field(loff_t,	offset) +		__field(loff_t, len) +	), + +	TP_fast_assign( +		__entry->dev	= inode->i_sb->s_dev; +		__entry->ino	= inode->i_ino; +		__entry->offset	= offset; +		__entry->len	= len; +	), + +	TP_printk("dev %d,%d ino %lu offset %lld len %lld", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  (unsigned long) __entry->ino, +		  __entry->offset, __entry->len) +); +  #endif /* _TRACE_EXT4_H */  /* This part must be outside protection */ diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h new file mode 100644 index 00000000000..b983990b4a9 --- /dev/null +++ b/include/trace/events/f2fs.h @@ -0,0 +1,932 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM f2fs + +#if !defined(_TRACE_F2FS_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_F2FS_H + +#include <linux/tracepoint.h> + +#define show_dev(entry)		MAJOR(entry->dev), MINOR(entry->dev) +#define show_dev_ino(entry)	show_dev(entry), (unsigned long)entry->ino + +#define show_block_type(type)						\ +	__print_symbolic(type,						\ +		{ NODE,		"NODE" },				\ +		{ DATA,		"DATA" },				\ +		{ META,		"META" },				\ +		{ META_FLUSH,	"META_FLUSH" }) + +#define F2FS_BIO_MASK(t)	(t & (READA | WRITE_FLUSH_FUA)) +#define F2FS_BIO_EXTRA_MASK(t)	(t & (REQ_META | REQ_PRIO)) + +#define show_bio_type(type)	show_bio_base(type), show_bio_extra(type) + +#define show_bio_base(type)						\ +	__print_symbolic(F2FS_BIO_MASK(type),				\ +		{ READ, 		"READ" },			\ +		{ READA, 		"READAHEAD" },			\ +		{ READ_SYNC, 		"READ_SYNC" },			\ +		{ WRITE, 		"WRITE" },			\ +		{ WRITE_SYNC, 		"WRITE_SYNC" },			\ +		{ WRITE_FLUSH,		"WRITE_FLUSH" },		\ +		{ WRITE_FUA, 		"WRITE_FUA" },			\ +		{ WRITE_FLUSH_FUA,	"WRITE_FLUSH_FUA" }) + +#define show_bio_extra(type)						\ +	__print_symbolic(F2FS_BIO_EXTRA_MASK(type),			\ +		{ REQ_META, 		"(M)" },			\ +		{ REQ_PRIO, 		"(P)" },			\ +		{ REQ_META | REQ_PRIO,	"(MP)" },			\ +		{ 0, " \b" }) + +#define show_data_type(type)						\ +	__print_symbolic(type,						\ +		{ CURSEG_HOT_DATA, 	"Hot DATA" },			\ +		{ CURSEG_WARM_DATA, 	"Warm DATA" },			\ +		{ CURSEG_COLD_DATA, 	"Cold DATA" },			\ +		{ CURSEG_HOT_NODE, 	"Hot NODE" },			\ +		{ CURSEG_WARM_NODE, 	"Warm NODE" },			\ +		{ CURSEG_COLD_NODE, 	"Cold NODE" },			\ +		{ NO_CHECK_TYPE, 	"No TYPE" }) + +#define show_file_type(type)						\ +	__print_symbolic(type,						\ +		{ 0,		"FILE" },				\ +		{ 1,		"DIR" }) + +#define show_gc_type(type)						\ +	__print_symbolic(type,						\ +		{ FG_GC,	"Foreground GC" },			\ +		{ BG_GC,	"Background GC" }) + +#define show_alloc_mode(type)						\ +	__print_symbolic(type,						\ +		{ LFS,	"LFS-mode" },					\ +		{ SSR,	"SSR-mode" }) + +#define show_victim_policy(type)					\ +	__print_symbolic(type,						\ +		{ GC_GREEDY,	"Greedy" },				\ +		{ GC_CB,	"Cost-Benefit" }) + +struct victim_sel_policy; + +DECLARE_EVENT_CLASS(f2fs__inode, + +	TP_PROTO(struct inode *inode), + +	TP_ARGS(inode), + +	TP_STRUCT__entry( +		__field(dev_t,	dev) +		__field(ino_t,	ino) +		__field(ino_t,	pino) +		__field(umode_t, mode) +		__field(loff_t,	size) +		__field(unsigned int, nlink) +		__field(blkcnt_t, blocks) +		__field(__u8,	advise) +	), + +	TP_fast_assign( +		__entry->dev	= inode->i_sb->s_dev; +		__entry->ino	= inode->i_ino; +		__entry->pino	= F2FS_I(inode)->i_pino; +		__entry->mode	= inode->i_mode; +		__entry->nlink	= inode->i_nlink; +		__entry->size	= inode->i_size; +		__entry->blocks	= inode->i_blocks; +		__entry->advise	= F2FS_I(inode)->i_advise; +	), + +	TP_printk("dev = (%d,%d), ino = %lu, pino = %lu, i_mode = 0x%hx, " +		"i_size = %lld, i_nlink = %u, i_blocks = %llu, i_advise = 0x%x", +		show_dev_ino(__entry), +		(unsigned long)__entry->pino, +		__entry->mode, +		__entry->size, +		(unsigned int)__entry->nlink, +		(unsigned long long)__entry->blocks, +		(unsigned char)__entry->advise) +); + +DECLARE_EVENT_CLASS(f2fs__inode_exit, + +	TP_PROTO(struct inode *inode, int ret), + +	TP_ARGS(inode, ret), + +	TP_STRUCT__entry( +		__field(dev_t,	dev) +		__field(ino_t,	ino) +		__field(int,	ret) +	), + +	TP_fast_assign( +		__entry->dev	= inode->i_sb->s_dev; +		__entry->ino	= inode->i_ino; +		__entry->ret	= ret; +	), + +	TP_printk("dev = (%d,%d), ino = %lu, ret = %d", +		show_dev_ino(__entry), +		__entry->ret) +); + +DEFINE_EVENT(f2fs__inode, f2fs_sync_file_enter, + +	TP_PROTO(struct inode *inode), + +	TP_ARGS(inode) +); + +TRACE_EVENT(f2fs_sync_file_exit, + +	TP_PROTO(struct inode *inode, bool need_cp, int datasync, int ret), + +	TP_ARGS(inode, need_cp, datasync, ret), + +	TP_STRUCT__entry( +		__field(dev_t,	dev) +		__field(ino_t,	ino) +		__field(bool,	need_cp) +		__field(int,	datasync) +		__field(int,	ret) +	), + +	TP_fast_assign( +		__entry->dev		= inode->i_sb->s_dev; +		__entry->ino		= inode->i_ino; +		__entry->need_cp	= need_cp; +		__entry->datasync	= datasync; +		__entry->ret		= ret; +	), + +	TP_printk("dev = (%d,%d), ino = %lu, checkpoint is %s, " +		"datasync = %d, ret = %d", +		show_dev_ino(__entry), +		__entry->need_cp ? "needed" : "not needed", +		__entry->datasync, +		__entry->ret) +); + +TRACE_EVENT(f2fs_sync_fs, + +	TP_PROTO(struct super_block *sb, int wait), + +	TP_ARGS(sb, wait), + +	TP_STRUCT__entry( +		__field(dev_t,	dev) +		__field(int,	dirty) +		__field(int,	wait) +	), + +	TP_fast_assign( +		__entry->dev	= sb->s_dev; +		__entry->dirty	= F2FS_SB(sb)->s_dirty; +		__entry->wait	= wait; +	), + +	TP_printk("dev = (%d,%d), superblock is %s, wait = %d", +		show_dev(__entry), +		__entry->dirty ? "dirty" : "not dirty", +		__entry->wait) +); + +DEFINE_EVENT(f2fs__inode, f2fs_iget, + +	TP_PROTO(struct inode *inode), + +	TP_ARGS(inode) +); + +DEFINE_EVENT(f2fs__inode_exit, f2fs_iget_exit, + +	TP_PROTO(struct inode *inode, int ret), + +	TP_ARGS(inode, ret) +); + +DEFINE_EVENT(f2fs__inode, f2fs_evict_inode, + +	TP_PROTO(struct inode *inode), + +	TP_ARGS(inode) +); + +DEFINE_EVENT(f2fs__inode_exit, f2fs_new_inode, + +	TP_PROTO(struct inode *inode, int ret), + +	TP_ARGS(inode, ret) +); + +TRACE_EVENT(f2fs_unlink_enter, + +	TP_PROTO(struct inode *dir, struct dentry *dentry), + +	TP_ARGS(dir, dentry), + +	TP_STRUCT__entry( +		__field(dev_t,	dev) +		__field(ino_t,	ino) +		__field(loff_t,	size) +		__field(blkcnt_t, blocks) +		__field(const char *,	name) +	), + +	TP_fast_assign( +		__entry->dev	= dir->i_sb->s_dev; +		__entry->ino	= dir->i_ino; +		__entry->size	= dir->i_size; +		__entry->blocks	= dir->i_blocks; +		__entry->name	= dentry->d_name.name; +	), + +	TP_printk("dev = (%d,%d), dir ino = %lu, i_size = %lld, " +		"i_blocks = %llu, name = %s", +		show_dev_ino(__entry), +		__entry->size, +		(unsigned long long)__entry->blocks, +		__entry->name) +); + +DEFINE_EVENT(f2fs__inode_exit, f2fs_unlink_exit, + +	TP_PROTO(struct inode *inode, int ret), + +	TP_ARGS(inode, ret) +); + +DEFINE_EVENT(f2fs__inode, f2fs_truncate, + +	TP_PROTO(struct inode *inode), + +	TP_ARGS(inode) +); + +TRACE_EVENT(f2fs_truncate_data_blocks_range, + +	TP_PROTO(struct inode *inode, nid_t nid, unsigned int ofs, int free), + +	TP_ARGS(inode, nid,  ofs, free), + +	TP_STRUCT__entry( +		__field(dev_t,	dev) +		__field(ino_t,	ino) +		__field(nid_t,	nid) +		__field(unsigned int,	ofs) +		__field(int,	free) +	), + +	TP_fast_assign( +		__entry->dev	= inode->i_sb->s_dev; +		__entry->ino	= inode->i_ino; +		__entry->nid	= nid; +		__entry->ofs	= ofs; +		__entry->free	= free; +	), + +	TP_printk("dev = (%d,%d), ino = %lu, nid = %u, offset = %u, freed = %d", +		show_dev_ino(__entry), +		(unsigned int)__entry->nid, +		__entry->ofs, +		__entry->free) +); + +DECLARE_EVENT_CLASS(f2fs__truncate_op, + +	TP_PROTO(struct inode *inode, u64 from), + +	TP_ARGS(inode, from), + +	TP_STRUCT__entry( +		__field(dev_t,	dev) +		__field(ino_t,	ino) +		__field(loff_t,	size) +		__field(blkcnt_t, blocks) +		__field(u64,	from) +	), + +	TP_fast_assign( +		__entry->dev	= inode->i_sb->s_dev; +		__entry->ino	= inode->i_ino; +		__entry->size	= inode->i_size; +		__entry->blocks	= inode->i_blocks; +		__entry->from	= from; +	), + +	TP_printk("dev = (%d,%d), ino = %lu, i_size = %lld, i_blocks = %llu, " +		"start file offset = %llu", +		show_dev_ino(__entry), +		__entry->size, +		(unsigned long long)__entry->blocks, +		(unsigned long long)__entry->from) +); + +DEFINE_EVENT(f2fs__truncate_op, f2fs_truncate_blocks_enter, + +	TP_PROTO(struct inode *inode, u64 from), + +	TP_ARGS(inode, from) +); + +DEFINE_EVENT(f2fs__inode_exit, f2fs_truncate_blocks_exit, + +	TP_PROTO(struct inode *inode, int ret), + +	TP_ARGS(inode, ret) +); + +DEFINE_EVENT(f2fs__truncate_op, f2fs_truncate_inode_blocks_enter, + +	TP_PROTO(struct inode *inode, u64 from), + +	TP_ARGS(inode, from) +); + +DEFINE_EVENT(f2fs__inode_exit, f2fs_truncate_inode_blocks_exit, + +	TP_PROTO(struct inode *inode, int ret), + +	TP_ARGS(inode, ret) +); + +DECLARE_EVENT_CLASS(f2fs__truncate_node, + +	TP_PROTO(struct inode *inode, nid_t nid, block_t blk_addr), + +	TP_ARGS(inode, nid, blk_addr), + +	TP_STRUCT__entry( +		__field(dev_t,	dev) +		__field(ino_t,	ino) +		__field(nid_t,	nid) +		__field(block_t,	blk_addr) +	), + +	TP_fast_assign( +		__entry->dev		= inode->i_sb->s_dev; +		__entry->ino		= inode->i_ino; +		__entry->nid		= nid; +		__entry->blk_addr	= blk_addr; +	), + +	TP_printk("dev = (%d,%d), ino = %lu, nid = %u, block_address = 0x%llx", +		show_dev_ino(__entry), +		(unsigned int)__entry->nid, +		(unsigned long long)__entry->blk_addr) +); + +DEFINE_EVENT(f2fs__truncate_node, f2fs_truncate_nodes_enter, + +	TP_PROTO(struct inode *inode, nid_t nid, block_t blk_addr), + +	TP_ARGS(inode, nid, blk_addr) +); + +DEFINE_EVENT(f2fs__inode_exit, f2fs_truncate_nodes_exit, + +	TP_PROTO(struct inode *inode, int ret), + +	TP_ARGS(inode, ret) +); + +DEFINE_EVENT(f2fs__truncate_node, f2fs_truncate_node, + +	TP_PROTO(struct inode *inode, nid_t nid, block_t blk_addr), + +	TP_ARGS(inode, nid, blk_addr) +); + +TRACE_EVENT(f2fs_truncate_partial_nodes, + +	TP_PROTO(struct inode *inode, nid_t nid[], int depth, int err), + +	TP_ARGS(inode, nid, depth, err), + +	TP_STRUCT__entry( +		__field(dev_t,	dev) +		__field(ino_t,	ino) +		__field(nid_t,	nid[3]) +		__field(int,	depth) +		__field(int,	err) +	), + +	TP_fast_assign( +		__entry->dev	= inode->i_sb->s_dev; +		__entry->ino	= inode->i_ino; +		__entry->nid[0]	= nid[0]; +		__entry->nid[1]	= nid[1]; +		__entry->nid[2]	= nid[2]; +		__entry->depth	= depth; +		__entry->err	= err; +	), + +	TP_printk("dev = (%d,%d), ino = %lu, " +		"nid[0] = %u, nid[1] = %u, nid[2] = %u, depth = %d, err = %d", +		show_dev_ino(__entry), +		(unsigned int)__entry->nid[0], +		(unsigned int)__entry->nid[1], +		(unsigned int)__entry->nid[2], +		__entry->depth, +		__entry->err) +); + +TRACE_EVENT_CONDITION(f2fs_submit_page_bio, + +	TP_PROTO(struct page *page, sector_t blkaddr, int type), + +	TP_ARGS(page, blkaddr, type), + +	TP_CONDITION(page->mapping), + +	TP_STRUCT__entry( +		__field(dev_t,	dev) +		__field(ino_t,	ino) +		__field(pgoff_t,	index) +		__field(sector_t,	blkaddr) +		__field(int,	type) +	), + +	TP_fast_assign( +		__entry->dev		= page->mapping->host->i_sb->s_dev; +		__entry->ino		= page->mapping->host->i_ino; +		__entry->index		= page->index; +		__entry->blkaddr	= blkaddr; +		__entry->type		= type; +	), + +	TP_printk("dev = (%d,%d), ino = %lu, page_index = 0x%lx, " +		"blkaddr = 0x%llx, bio_type = %s%s", +		show_dev_ino(__entry), +		(unsigned long)__entry->index, +		(unsigned long long)__entry->blkaddr, +		show_bio_type(__entry->type)) +); + +TRACE_EVENT(f2fs_get_data_block, +	TP_PROTO(struct inode *inode, sector_t iblock, +				struct buffer_head *bh, int ret), + +	TP_ARGS(inode, iblock, bh, ret), + +	TP_STRUCT__entry( +		__field(dev_t,	dev) +		__field(ino_t,	ino) +		__field(sector_t,	iblock) +		__field(sector_t,	bh_start) +		__field(size_t,	bh_size) +		__field(int,	ret) +	), + +	TP_fast_assign( +		__entry->dev		= inode->i_sb->s_dev; +		__entry->ino		= inode->i_ino; +		__entry->iblock		= iblock; +		__entry->bh_start	= bh->b_blocknr; +		__entry->bh_size	= bh->b_size; +		__entry->ret		= ret; +	), + +	TP_printk("dev = (%d,%d), ino = %lu, file offset = %llu, " +		"start blkaddr = 0x%llx, len = 0x%llx bytes, err = %d", +		show_dev_ino(__entry), +		(unsigned long long)__entry->iblock, +		(unsigned long long)__entry->bh_start, +		(unsigned long long)__entry->bh_size, +		__entry->ret) +); + +TRACE_EVENT(f2fs_get_victim, + +	TP_PROTO(struct super_block *sb, int type, int gc_type, +			struct victim_sel_policy *p, unsigned int pre_victim, +			unsigned int prefree, unsigned int free), + +	TP_ARGS(sb, type, gc_type, p, pre_victim, prefree, free), + +	TP_STRUCT__entry( +		__field(dev_t,	dev) +		__field(int,	type) +		__field(int,	gc_type) +		__field(int,	alloc_mode) +		__field(int,	gc_mode) +		__field(unsigned int,	victim) +		__field(unsigned int,	ofs_unit) +		__field(unsigned int,	pre_victim) +		__field(unsigned int,	prefree) +		__field(unsigned int,	free) +	), + +	TP_fast_assign( +		__entry->dev		= sb->s_dev; +		__entry->type		= type; +		__entry->gc_type	= gc_type; +		__entry->alloc_mode	= p->alloc_mode; +		__entry->gc_mode	= p->gc_mode; +		__entry->victim		= p->min_segno; +		__entry->ofs_unit	= p->ofs_unit; +		__entry->pre_victim	= pre_victim; +		__entry->prefree	= prefree; +		__entry->free		= free; +	), + +	TP_printk("dev = (%d,%d), type = %s, policy = (%s, %s, %s), victim = %u " +		"ofs_unit = %u, pre_victim_secno = %d, prefree = %u, free = %u", +		show_dev(__entry), +		show_data_type(__entry->type), +		show_gc_type(__entry->gc_type), +		show_alloc_mode(__entry->alloc_mode), +		show_victim_policy(__entry->gc_mode), +		__entry->victim, +		__entry->ofs_unit, +		(int)__entry->pre_victim, +		__entry->prefree, +		__entry->free) +); + +TRACE_EVENT(f2fs_fallocate, + +	TP_PROTO(struct inode *inode, int mode, +				loff_t offset, loff_t len, int ret), + +	TP_ARGS(inode, mode, offset, len, ret), + +	TP_STRUCT__entry( +		__field(dev_t,	dev) +		__field(ino_t,	ino) +		__field(int,	mode) +		__field(loff_t,	offset) +		__field(loff_t,	len) +		__field(loff_t, size) +		__field(blkcnt_t, blocks) +		__field(int,	ret) +	), + +	TP_fast_assign( +		__entry->dev	= inode->i_sb->s_dev; +		__entry->ino	= inode->i_ino; +		__entry->mode	= mode; +		__entry->offset	= offset; +		__entry->len	= len; +		__entry->size	= inode->i_size; +		__entry->blocks = inode->i_blocks; +		__entry->ret	= ret; +	), + +	TP_printk("dev = (%d,%d), ino = %lu, mode = %x, offset = %lld, " +		"len = %lld,  i_size = %lld, i_blocks = %llu, ret = %d", +		show_dev_ino(__entry), +		__entry->mode, +		(unsigned long long)__entry->offset, +		(unsigned long long)__entry->len, +		(unsigned long long)__entry->size, +		(unsigned long long)__entry->blocks, +		__entry->ret) +); + +TRACE_EVENT(f2fs_reserve_new_block, + +	TP_PROTO(struct inode *inode, nid_t nid, unsigned int ofs_in_node), + +	TP_ARGS(inode, nid, ofs_in_node), + +	TP_STRUCT__entry( +		__field(dev_t,	dev) +		__field(nid_t, nid) +		__field(unsigned int, ofs_in_node) +	), + +	TP_fast_assign( +		__entry->dev	= inode->i_sb->s_dev; +		__entry->nid	= nid; +		__entry->ofs_in_node = ofs_in_node; +	), + +	TP_printk("dev = (%d,%d), nid = %u, ofs_in_node = %u", +		show_dev(__entry), +		(unsigned int)__entry->nid, +		__entry->ofs_in_node) +); + +DECLARE_EVENT_CLASS(f2fs__submit_bio, + +	TP_PROTO(struct super_block *sb, int rw, int type, struct bio *bio), + +	TP_ARGS(sb, rw, type, bio), + +	TP_STRUCT__entry( +		__field(dev_t,	dev) +		__field(int,	rw) +		__field(int,	type) +		__field(sector_t,	sector) +		__field(unsigned int,	size) +	), + +	TP_fast_assign( +		__entry->dev		= sb->s_dev; +		__entry->rw		= rw; +		__entry->type		= type; +		__entry->sector		= bio->bi_iter.bi_sector; +		__entry->size		= bio->bi_iter.bi_size; +	), + +	TP_printk("dev = (%d,%d), %s%s, %s, sector = %lld, size = %u", +		show_dev(__entry), +		show_bio_type(__entry->rw), +		show_block_type(__entry->type), +		(unsigned long long)__entry->sector, +		__entry->size) +); + +DEFINE_EVENT_CONDITION(f2fs__submit_bio, f2fs_submit_write_bio, + +	TP_PROTO(struct super_block *sb, int rw, int type, struct bio *bio), + +	TP_ARGS(sb, rw, type, bio), + +	TP_CONDITION(bio) +); + +DEFINE_EVENT_CONDITION(f2fs__submit_bio, f2fs_submit_read_bio, + +	TP_PROTO(struct super_block *sb, int rw, int type, struct bio *bio), + +	TP_ARGS(sb, rw, type, bio), + +	TP_CONDITION(bio) +); + +TRACE_EVENT(f2fs_write_begin, + +	TP_PROTO(struct inode *inode, loff_t pos, unsigned int len, +				unsigned int flags), + +	TP_ARGS(inode, pos, len, flags), + +	TP_STRUCT__entry( +		__field(dev_t,	dev) +		__field(ino_t,	ino) +		__field(loff_t,	pos) +		__field(unsigned int, len) +		__field(unsigned int, flags) +	), + +	TP_fast_assign( +		__entry->dev	= inode->i_sb->s_dev; +		__entry->ino	= inode->i_ino; +		__entry->pos	= pos; +		__entry->len	= len; +		__entry->flags	= flags; +	), + +	TP_printk("dev = (%d,%d), ino = %lu, pos = %llu, len = %u, flags = %u", +		show_dev_ino(__entry), +		(unsigned long long)__entry->pos, +		__entry->len, +		__entry->flags) +); + +TRACE_EVENT(f2fs_write_end, + +	TP_PROTO(struct inode *inode, loff_t pos, unsigned int len, +				unsigned int copied), + +	TP_ARGS(inode, pos, len, copied), + +	TP_STRUCT__entry( +		__field(dev_t,	dev) +		__field(ino_t,	ino) +		__field(loff_t,	pos) +		__field(unsigned int, len) +		__field(unsigned int, copied) +	), + +	TP_fast_assign( +		__entry->dev	= inode->i_sb->s_dev; +		__entry->ino	= inode->i_ino; +		__entry->pos	= pos; +		__entry->len	= len; +		__entry->copied	= copied; +	), + +	TP_printk("dev = (%d,%d), ino = %lu, pos = %llu, len = %u, copied = %u", +		show_dev_ino(__entry), +		(unsigned long long)__entry->pos, +		__entry->len, +		__entry->copied) +); + +DECLARE_EVENT_CLASS(f2fs__page, + +	TP_PROTO(struct page *page, int type), + +	TP_ARGS(page, type), + +	TP_STRUCT__entry( +		__field(dev_t,	dev) +		__field(ino_t,	ino) +		__field(int, type) +		__field(int, dir) +		__field(pgoff_t, index) +		__field(int, dirty) +		__field(int, uptodate) +	), + +	TP_fast_assign( +		__entry->dev	= page->mapping->host->i_sb->s_dev; +		__entry->ino	= page->mapping->host->i_ino; +		__entry->type	= type; +		__entry->dir	= S_ISDIR(page->mapping->host->i_mode); +		__entry->index	= page->index; +		__entry->dirty	= PageDirty(page); +		__entry->uptodate = PageUptodate(page); +	), + +	TP_printk("dev = (%d,%d), ino = %lu, %s, %s, index = %lu, " +		"dirty = %d, uptodate = %d", +		show_dev_ino(__entry), +		show_block_type(__entry->type), +		show_file_type(__entry->dir), +		(unsigned long)__entry->index, +		__entry->dirty, +		__entry->uptodate) +); + +DEFINE_EVENT(f2fs__page, f2fs_writepage, + +	TP_PROTO(struct page *page, int type), + +	TP_ARGS(page, type) +); + +DEFINE_EVENT(f2fs__page, f2fs_readpage, + +	TP_PROTO(struct page *page, int type), + +	TP_ARGS(page, type) +); + +DEFINE_EVENT(f2fs__page, f2fs_set_page_dirty, + +	TP_PROTO(struct page *page, int type), + +	TP_ARGS(page, type) +); + +DEFINE_EVENT(f2fs__page, f2fs_vm_page_mkwrite, + +	TP_PROTO(struct page *page, int type), + +	TP_ARGS(page, type) +); + +TRACE_EVENT(f2fs_writepages, + +	TP_PROTO(struct inode *inode, struct writeback_control *wbc, int type), + +	TP_ARGS(inode, wbc, type), + +	TP_STRUCT__entry( +		__field(dev_t,	dev) +		__field(ino_t,	ino) +		__field(int,	type) +		__field(int,	dir) +		__field(long,	nr_to_write) +		__field(long,	pages_skipped) +		__field(loff_t,	range_start) +		__field(loff_t,	range_end) +		__field(pgoff_t, writeback_index) +		__field(int,	sync_mode) +		__field(char,	for_kupdate) +		__field(char,	for_background) +		__field(char,	tagged_writepages) +		__field(char,	for_reclaim) +		__field(char,	range_cyclic) +		__field(char,	for_sync) +	), + +	TP_fast_assign( +		__entry->dev		= inode->i_sb->s_dev; +		__entry->ino		= inode->i_ino; +		__entry->type		= type; +		__entry->dir		= S_ISDIR(inode->i_mode); +		__entry->nr_to_write	= wbc->nr_to_write; +		__entry->pages_skipped	= wbc->pages_skipped; +		__entry->range_start	= wbc->range_start; +		__entry->range_end	= wbc->range_end; +		__entry->writeback_index = inode->i_mapping->writeback_index; +		__entry->sync_mode	= wbc->sync_mode; +		__entry->for_kupdate	= wbc->for_kupdate; +		__entry->for_background	= wbc->for_background; +		__entry->tagged_writepages	= wbc->tagged_writepages; +		__entry->for_reclaim	= wbc->for_reclaim; +		__entry->range_cyclic	= wbc->range_cyclic; +		__entry->for_sync	= wbc->for_sync; +	), + +	TP_printk("dev = (%d,%d), ino = %lu, %s, %s, nr_to_write %ld, " +		"skipped %ld, start %lld, end %lld, wb_idx %lu, sync_mode %d, " +		"kupdate %u background %u tagged %u reclaim %u cyclic %u sync %u", +		show_dev_ino(__entry), +		show_block_type(__entry->type), +		show_file_type(__entry->dir), +		__entry->nr_to_write, +		__entry->pages_skipped, +		__entry->range_start, +		__entry->range_end, +		(unsigned long)__entry->writeback_index, +		__entry->sync_mode, +		__entry->for_kupdate, +		__entry->for_background, +		__entry->tagged_writepages, +		__entry->for_reclaim, +		__entry->range_cyclic, +		__entry->for_sync) +); + +TRACE_EVENT(f2fs_submit_page_mbio, + +	TP_PROTO(struct page *page, int rw, int type, block_t blk_addr), + +	TP_ARGS(page, rw, type, blk_addr), + +	TP_STRUCT__entry( +		__field(dev_t,	dev) +		__field(ino_t,	ino) +		__field(int, rw) +		__field(int, type) +		__field(pgoff_t, index) +		__field(block_t, block) +	), + +	TP_fast_assign( +		__entry->dev	= page->mapping->host->i_sb->s_dev; +		__entry->ino	= page->mapping->host->i_ino; +		__entry->rw	= rw; +		__entry->type	= type; +		__entry->index	= page->index; +		__entry->block	= blk_addr; +	), + +	TP_printk("dev = (%d,%d), ino = %lu, %s%s, %s, index = %lu, blkaddr = 0x%llx", +		show_dev_ino(__entry), +		show_bio_type(__entry->rw), +		show_block_type(__entry->type), +		(unsigned long)__entry->index, +		(unsigned long long)__entry->block) +); + +TRACE_EVENT(f2fs_write_checkpoint, + +	TP_PROTO(struct super_block *sb, bool is_umount, char *msg), + +	TP_ARGS(sb, is_umount, msg), + +	TP_STRUCT__entry( +		__field(dev_t,	dev) +		__field(bool,	is_umount) +		__field(char *,	msg) +	), + +	TP_fast_assign( +		__entry->dev		= sb->s_dev; +		__entry->is_umount	= is_umount; +		__entry->msg		= msg; +	), + +	TP_printk("dev = (%d,%d), checkpoint for %s, state = %s", +		show_dev(__entry), +		__entry->is_umount ? "clean umount" : "consistency", +		__entry->msg) +); + +TRACE_EVENT(f2fs_issue_discard, + +	TP_PROTO(struct super_block *sb, block_t blkstart, block_t blklen), + +	TP_ARGS(sb, blkstart, blklen), + +	TP_STRUCT__entry( +		__field(dev_t,	dev) +		__field(block_t, blkstart) +		__field(block_t, blklen) +	), + +	TP_fast_assign( +		__entry->dev	= sb->s_dev; +		__entry->blkstart = blkstart; +		__entry->blklen = blklen; +	), + +	TP_printk("dev = (%d,%d), blkstart = 0x%llx, blklen = 0x%llx", +		show_dev(__entry), +		(unsigned long long)__entry->blkstart, +		(unsigned long long)__entry->blklen) +); +#endif /* _TRACE_F2FS_H */ + + /* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/filelock.h b/include/trace/events/filelock.h new file mode 100644 index 00000000000..59d11c22f07 --- /dev/null +++ b/include/trace/events/filelock.h @@ -0,0 +1,96 @@ +/* + * Events for filesystem locks + * + * Copyright 2013 Jeff Layton <jlayton@poochiereds.net> + */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM filelock + +#if !defined(_TRACE_FILELOCK_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_FILELOCK_H + +#include <linux/tracepoint.h> +#include <linux/fs.h> +#include <linux/device.h> +#include <linux/kdev_t.h> + +#define show_fl_flags(val)						\ +	__print_flags(val, "|", 					\ +		{ FL_POSIX,		"FL_POSIX" },			\ +		{ FL_FLOCK,		"FL_FLOCK" },			\ +		{ FL_DELEG,		"FL_DELEG" },			\ +		{ FL_ACCESS,		"FL_ACCESS" },			\ +		{ FL_EXISTS,		"FL_EXISTS" },			\ +		{ FL_LEASE,		"FL_LEASE" },			\ +		{ FL_CLOSE,		"FL_CLOSE" },			\ +		{ FL_SLEEP,		"FL_SLEEP" },			\ +		{ FL_DOWNGRADE_PENDING,	"FL_DOWNGRADE_PENDING" },	\ +		{ FL_UNLOCK_PENDING,	"FL_UNLOCK_PENDING" },		\ +		{ FL_OFDLCK,		"FL_OFDLCK" }) + +#define show_fl_type(val)				\ +	__print_symbolic(val,				\ +			{ F_RDLCK, "F_RDLCK" },		\ +			{ F_WRLCK, "F_WRLCK" },		\ +			{ F_UNLCK, "F_UNLCK" }) + +DECLARE_EVENT_CLASS(filelock_lease, + +	TP_PROTO(struct inode *inode, struct file_lock *fl), + +	TP_ARGS(inode, fl), + +	TP_STRUCT__entry( +		__field(struct file_lock *, fl) +		__field(unsigned long, i_ino) +		__field(dev_t, s_dev) +		__field(struct file_lock *, fl_next) +		__field(fl_owner_t, fl_owner) +		__field(unsigned int, fl_flags) +		__field(unsigned char, fl_type) +		__field(unsigned long, fl_break_time) +		__field(unsigned long, fl_downgrade_time) +	), + +	TP_fast_assign( +		__entry->fl = fl; +		__entry->s_dev = inode->i_sb->s_dev; +		__entry->i_ino = inode->i_ino; +		__entry->fl_next = fl->fl_next; +		__entry->fl_owner = fl->fl_owner; +		__entry->fl_flags = fl->fl_flags; +		__entry->fl_type = fl->fl_type; +		__entry->fl_break_time = fl->fl_break_time; +		__entry->fl_downgrade_time = fl->fl_downgrade_time; +	), + +	TP_printk("fl=0x%p dev=0x%x:0x%x ino=0x%lx fl_next=0x%p fl_owner=0x%p fl_flags=%s fl_type=%s fl_break_time=%lu fl_downgrade_time=%lu", +		__entry->fl, MAJOR(__entry->s_dev), MINOR(__entry->s_dev), +		__entry->i_ino, __entry->fl_next, __entry->fl_owner, +		show_fl_flags(__entry->fl_flags), +		show_fl_type(__entry->fl_type), +		__entry->fl_break_time, __entry->fl_downgrade_time) +); + +DEFINE_EVENT(filelock_lease, break_lease_noblock, TP_PROTO(struct inode *inode, struct file_lock *fl), +		TP_ARGS(inode, fl)); + +DEFINE_EVENT(filelock_lease, break_lease_block, TP_PROTO(struct inode *inode, struct file_lock *fl), +		TP_ARGS(inode, fl)); + +DEFINE_EVENT(filelock_lease, break_lease_unblock, TP_PROTO(struct inode *inode, struct file_lock *fl), +		TP_ARGS(inode, fl)); + +DEFINE_EVENT(filelock_lease, generic_add_lease, TP_PROTO(struct inode *inode, struct file_lock *fl), +		TP_ARGS(inode, fl)); + +DEFINE_EVENT(filelock_lease, generic_delete_lease, TP_PROTO(struct inode *inode, struct file_lock *fl), +		TP_ARGS(inode, fl)); + +DEFINE_EVENT(filelock_lease, time_out_leases, TP_PROTO(struct inode *inode, struct file_lock *fl), +		TP_ARGS(inode, fl)); + +#endif /* _TRACE_FILELOCK_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/filemap.h b/include/trace/events/filemap.h new file mode 100644 index 00000000000..0421f49a20f --- /dev/null +++ b/include/trace/events/filemap.h @@ -0,0 +1,58 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM filemap + +#if !defined(_TRACE_FILEMAP_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_FILEMAP_H + +#include <linux/types.h> +#include <linux/tracepoint.h> +#include <linux/mm.h> +#include <linux/memcontrol.h> +#include <linux/device.h> +#include <linux/kdev_t.h> + +DECLARE_EVENT_CLASS(mm_filemap_op_page_cache, + +	TP_PROTO(struct page *page), + +	TP_ARGS(page), + +	TP_STRUCT__entry( +		__field(struct page *, page) +		__field(unsigned long, i_ino) +		__field(unsigned long, index) +		__field(dev_t, s_dev) +	), + +	TP_fast_assign( +		__entry->page = page; +		__entry->i_ino = page->mapping->host->i_ino; +		__entry->index = page->index; +		if (page->mapping->host->i_sb) +			__entry->s_dev = page->mapping->host->i_sb->s_dev; +		else +			__entry->s_dev = page->mapping->host->i_rdev; +	), + +	TP_printk("dev %d:%d ino %lx page=%p pfn=%lu ofs=%lu", +		MAJOR(__entry->s_dev), MINOR(__entry->s_dev), +		__entry->i_ino, +		__entry->page, +		page_to_pfn(__entry->page), +		__entry->index << PAGE_SHIFT) +); + +DEFINE_EVENT(mm_filemap_op_page_cache, mm_filemap_delete_from_page_cache, +	TP_PROTO(struct page *page), +	TP_ARGS(page) +	); + +DEFINE_EVENT(mm_filemap_op_page_cache, mm_filemap_add_to_page_cache, +	TP_PROTO(struct page *page), +	TP_ARGS(page) +	); + +#endif /* _TRACE_FILEMAP_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/gfpflags.h b/include/trace/events/gfpflags.h index e3615c09374..d6fd8e5b14b 100644 --- a/include/trace/events/gfpflags.h +++ b/include/trace/events/gfpflags.h @@ -10,6 +10,7 @@   */  #define show_gfp_flags(flags)						\  	(flags) ? __print_flags(flags, "|",				\ +	{(unsigned long)GFP_TRANSHUGE,		"GFP_TRANSHUGE"},	\  	{(unsigned long)GFP_HIGHUSER_MOVABLE,	"GFP_HIGHUSER_MOVABLE"}, \  	{(unsigned long)GFP_HIGHUSER,		"GFP_HIGHUSER"},	\  	{(unsigned long)GFP_USER,		"GFP_USER"},		\ @@ -29,9 +30,13 @@  	{(unsigned long)__GFP_COMP,		"GFP_COMP"},		\  	{(unsigned long)__GFP_ZERO,		"GFP_ZERO"},		\  	{(unsigned long)__GFP_NOMEMALLOC,	"GFP_NOMEMALLOC"},	\ +	{(unsigned long)__GFP_MEMALLOC,		"GFP_MEMALLOC"},	\  	{(unsigned long)__GFP_HARDWALL,		"GFP_HARDWALL"},	\  	{(unsigned long)__GFP_THISNODE,		"GFP_THISNODE"},	\  	{(unsigned long)__GFP_RECLAIMABLE,	"GFP_RECLAIMABLE"},	\ -	{(unsigned long)__GFP_MOVABLE,		"GFP_MOVABLE"}		\ +	{(unsigned long)__GFP_MOVABLE,		"GFP_MOVABLE"},		\ +	{(unsigned long)__GFP_NOTRACK,		"GFP_NOTRACK"},		\ +	{(unsigned long)__GFP_NO_KSWAPD,	"GFP_NO_KSWAPD"},	\ +	{(unsigned long)__GFP_OTHER_NODE,	"GFP_OTHER_NODE"}	\  	) : "GFP_NOWAIT" diff --git a/include/trace/events/gpio.h b/include/trace/events/gpio.h new file mode 100644 index 00000000000..927a8ad9e51 --- /dev/null +++ b/include/trace/events/gpio.h @@ -0,0 +1,56 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM gpio + +#if !defined(_TRACE_GPIO_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_GPIO_H + +#include <linux/tracepoint.h> + +TRACE_EVENT(gpio_direction, + +	TP_PROTO(unsigned gpio, int in, int err), + +	TP_ARGS(gpio, in, err), + +	TP_STRUCT__entry( +		__field(unsigned, gpio) +		__field(int, in) +		__field(int, err) +	), + +	TP_fast_assign( +		__entry->gpio = gpio; +		__entry->in = in; +		__entry->err = err; +	), + +	TP_printk("%u %3s (%d)", __entry->gpio, +		__entry->in ? "in" : "out", __entry->err) +); + +TRACE_EVENT(gpio_value, + +	TP_PROTO(unsigned gpio, int get, int value), + +	TP_ARGS(gpio, get, value), + +	TP_STRUCT__entry( +		__field(unsigned, gpio) +		__field(int, get) +		__field(int, value) +	), + +	TP_fast_assign( +		__entry->gpio = gpio; +		__entry->get = get; +		__entry->value = value; +	), + +	TP_printk("%u %3s %d", __entry->gpio, +		__entry->get ? "get" : "set", __entry->value) +); + +#endif /* if !defined(_TRACE_GPIO_H) || defined(TRACE_HEADER_MULTI_READ) */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/host1x.h b/include/trace/events/host1x.h new file mode 100644 index 00000000000..94db6a2c354 --- /dev/null +++ b/include/trace/events/host1x.h @@ -0,0 +1,253 @@ +/* + * include/trace/events/host1x.h + * + * host1x event logging to ftrace. + * + * Copyright (c) 2010-2013, NVIDIA Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA. + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM host1x + +#if !defined(_TRACE_HOST1X_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_HOST1X_H + +#include <linux/ktime.h> +#include <linux/tracepoint.h> + +DECLARE_EVENT_CLASS(host1x, +	TP_PROTO(const char *name), +	TP_ARGS(name), +	TP_STRUCT__entry(__field(const char *, name)), +	TP_fast_assign(__entry->name = name;), +	TP_printk("name=%s", __entry->name) +); + +DEFINE_EVENT(host1x, host1x_channel_open, +	TP_PROTO(const char *name), +	TP_ARGS(name) +); + +DEFINE_EVENT(host1x, host1x_channel_release, +	TP_PROTO(const char *name), +	TP_ARGS(name) +); + +DEFINE_EVENT(host1x, host1x_cdma_begin, +	TP_PROTO(const char *name), +	TP_ARGS(name) +); + +DEFINE_EVENT(host1x, host1x_cdma_end, +	TP_PROTO(const char *name), +	TP_ARGS(name) +); + +TRACE_EVENT(host1x_cdma_push, +	TP_PROTO(const char *name, u32 op1, u32 op2), + +	TP_ARGS(name, op1, op2), + +	TP_STRUCT__entry( +		__field(const char *, name) +		__field(u32, op1) +		__field(u32, op2) +	), + +	TP_fast_assign( +		__entry->name = name; +		__entry->op1 = op1; +		__entry->op2 = op2; +	), + +	TP_printk("name=%s, op1=%08x, op2=%08x", +		__entry->name, __entry->op1, __entry->op2) +); + +TRACE_EVENT(host1x_cdma_push_gather, +	TP_PROTO(const char *name, u32 mem_id, +			u32 words, u32 offset, void *cmdbuf), + +	TP_ARGS(name, mem_id, words, offset, cmdbuf), + +	TP_STRUCT__entry( +		__field(const char *, name) +		__field(u32, mem_id) +		__field(u32, words) +		__field(u32, offset) +		__field(bool, cmdbuf) +		__dynamic_array(u32, cmdbuf, words) +	), + +	TP_fast_assign( +		if (cmdbuf) { +			memcpy(__get_dynamic_array(cmdbuf), cmdbuf+offset, +					words * sizeof(u32)); +		} +		__entry->cmdbuf = cmdbuf; +		__entry->name = name; +		__entry->mem_id = mem_id; +		__entry->words = words; +		__entry->offset = offset; +	), + +	TP_printk("name=%s, mem_id=%08x, words=%u, offset=%d, contents=[%s]", +	  __entry->name, __entry->mem_id, +	  __entry->words, __entry->offset, +	  __print_hex(__get_dynamic_array(cmdbuf), +		  __entry->cmdbuf ? __entry->words * 4 : 0)) +); + +TRACE_EVENT(host1x_channel_submit, +	TP_PROTO(const char *name, u32 cmdbufs, u32 relocs, u32 waitchks, +			u32 syncpt_id, u32 syncpt_incrs), + +	TP_ARGS(name, cmdbufs, relocs, waitchks, syncpt_id, syncpt_incrs), + +	TP_STRUCT__entry( +		__field(const char *, name) +		__field(u32, cmdbufs) +		__field(u32, relocs) +		__field(u32, waitchks) +		__field(u32, syncpt_id) +		__field(u32, syncpt_incrs) +	), + +	TP_fast_assign( +		__entry->name = name; +		__entry->cmdbufs = cmdbufs; +		__entry->relocs = relocs; +		__entry->waitchks = waitchks; +		__entry->syncpt_id = syncpt_id; +		__entry->syncpt_incrs = syncpt_incrs; +	), + +	TP_printk("name=%s, cmdbufs=%u, relocs=%u, waitchks=%d," +		"syncpt_id=%u, syncpt_incrs=%u", +	  __entry->name, __entry->cmdbufs, __entry->relocs, __entry->waitchks, +	  __entry->syncpt_id, __entry->syncpt_incrs) +); + +TRACE_EVENT(host1x_channel_submitted, +	TP_PROTO(const char *name, u32 syncpt_base, u32 syncpt_max), + +	TP_ARGS(name, syncpt_base, syncpt_max), + +	TP_STRUCT__entry( +		__field(const char *, name) +		__field(u32, syncpt_base) +		__field(u32, syncpt_max) +	), + +	TP_fast_assign( +		__entry->name = name; +		__entry->syncpt_base = syncpt_base; +		__entry->syncpt_max = syncpt_max; +	), + +	TP_printk("name=%s, syncpt_base=%d, syncpt_max=%d", +		__entry->name, __entry->syncpt_base, __entry->syncpt_max) +); + +TRACE_EVENT(host1x_channel_submit_complete, +	TP_PROTO(const char *name, int count, u32 thresh), + +	TP_ARGS(name, count, thresh), + +	TP_STRUCT__entry( +		__field(const char *, name) +		__field(int, count) +		__field(u32, thresh) +	), + +	TP_fast_assign( +		__entry->name = name; +		__entry->count = count; +		__entry->thresh = thresh; +	), + +	TP_printk("name=%s, count=%d, thresh=%d", +		__entry->name, __entry->count, __entry->thresh) +); + +TRACE_EVENT(host1x_wait_cdma, +	TP_PROTO(const char *name, u32 eventid), + +	TP_ARGS(name, eventid), + +	TP_STRUCT__entry( +		__field(const char *, name) +		__field(u32, eventid) +	), + +	TP_fast_assign( +		__entry->name = name; +		__entry->eventid = eventid; +	), + +	TP_printk("name=%s, event=%d", __entry->name, __entry->eventid) +); + +TRACE_EVENT(host1x_syncpt_load_min, +	TP_PROTO(u32 id, u32 val), + +	TP_ARGS(id, val), + +	TP_STRUCT__entry( +		__field(u32, id) +		__field(u32, val) +	), + +	TP_fast_assign( +		__entry->id = id; +		__entry->val = val; +	), + +	TP_printk("id=%d, val=%d", __entry->id, __entry->val) +); + +TRACE_EVENT(host1x_syncpt_wait_check, +	TP_PROTO(void *mem_id, u32 offset, u32 syncpt_id, u32 thresh, u32 min), + +	TP_ARGS(mem_id, offset, syncpt_id, thresh, min), + +	TP_STRUCT__entry( +		__field(void *, mem_id) +		__field(u32, offset) +		__field(u32, syncpt_id) +		__field(u32, thresh) +		__field(u32, min) +	), + +	TP_fast_assign( +		__entry->mem_id = mem_id; +		__entry->offset = offset; +		__entry->syncpt_id = syncpt_id; +		__entry->thresh = thresh; +		__entry->min = min; +	), + +	TP_printk("mem_id=%p, offset=%05x, id=%d, thresh=%d, current=%d", +		__entry->mem_id, __entry->offset, +		__entry->syncpt_id, __entry->thresh, +		__entry->min) +); + +#endif /*  _TRACE_HOST1X_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/hswadsp.h b/include/trace/events/hswadsp.h new file mode 100644 index 00000000000..0f78bbb0200 --- /dev/null +++ b/include/trace/events/hswadsp.h @@ -0,0 +1,384 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM hswadsp + +#if !defined(_TRACE_HSWADSP_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_HSWADSP_H + +#include <linux/types.h> +#include <linux/ktime.h> +#include <linux/tracepoint.h> + +struct sst_hsw; +struct sst_hsw_stream; +struct sst_hsw_ipc_stream_free_req; +struct sst_hsw_ipc_volume_req; +struct sst_hsw_ipc_stream_alloc_req; +struct sst_hsw_audio_data_format_ipc; +struct sst_hsw_ipc_stream_info_reply; +struct sst_hsw_ipc_device_config_req; + +DECLARE_EVENT_CLASS(sst_irq, + +	TP_PROTO(uint32_t status, uint32_t mask), + +	TP_ARGS(status, mask), + +	TP_STRUCT__entry( +		__field(	unsigned int,	status		) +		__field(	unsigned int,	mask		) +	), + +	TP_fast_assign( +		__entry->status = status; +		__entry->mask = mask; +	), + +	TP_printk("status 0x%8.8x mask 0x%8.8x", +		(unsigned int)__entry->status, (unsigned int)__entry->mask) +); + +DEFINE_EVENT(sst_irq, sst_irq_busy, + +	TP_PROTO(unsigned int status, unsigned int mask), + +	TP_ARGS(status, mask) + +); + +DEFINE_EVENT(sst_irq, sst_irq_done, + +	TP_PROTO(unsigned int status, unsigned int mask), + +	TP_ARGS(status, mask) + +); + +DECLARE_EVENT_CLASS(ipc, + +	TP_PROTO(const char *name, int val), + +	TP_ARGS(name, val), + +	TP_STRUCT__entry( +		__string(	name,	name		) +		__field(	unsigned int,	val	) +	), + +	TP_fast_assign( +		__assign_str(name, name); +		__entry->val = val; +	), + +	TP_printk("%s 0x%8.8x", __get_str(name), (unsigned int)__entry->val) + +); + +DEFINE_EVENT(ipc, ipc_request, + +	TP_PROTO(const char *name, int val), + +	TP_ARGS(name, val) + +); + +DEFINE_EVENT(ipc, ipc_reply, + +	TP_PROTO(const char *name, int val), + +	TP_ARGS(name, val) + +); + +DEFINE_EVENT(ipc, ipc_pending_reply, + +	TP_PROTO(const char *name, int val), + +	TP_ARGS(name, val) + +); + +DEFINE_EVENT(ipc, ipc_notification, + +	TP_PROTO(const char *name, int val), + +	TP_ARGS(name, val) + +); + +DEFINE_EVENT(ipc, ipc_error, + +	TP_PROTO(const char *name, int val), + +	TP_ARGS(name, val) + +); + +DECLARE_EVENT_CLASS(stream_position, + +	TP_PROTO(unsigned int id, unsigned int pos), + +	TP_ARGS(id, pos), + +	TP_STRUCT__entry( +		__field(	unsigned int,	id		) +		__field(	unsigned int,	pos		) +	), + +	TP_fast_assign( +		__entry->id = id; +		__entry->pos = pos; +	), + +	TP_printk("id %d position 0x%x", +		(unsigned int)__entry->id, (unsigned int)__entry->pos) +); + +DEFINE_EVENT(stream_position, stream_read_position, + +	TP_PROTO(unsigned int id, unsigned int pos), + +	TP_ARGS(id, pos) + +); + +DEFINE_EVENT(stream_position, stream_write_position, + +	TP_PROTO(unsigned int id, unsigned int pos), + +	TP_ARGS(id, pos) + +); + +TRACE_EVENT(hsw_stream_buffer, + +	TP_PROTO(struct sst_hsw_stream *stream), + +	TP_ARGS(stream), + +	TP_STRUCT__entry( +		__field(	int,	id	) +		__field(	int,	pt_addr	) +		__field(	int,	num_pages	) +		__field(	int,	ring_size	) +		__field(	int,	ring_offset	) +		__field(	int,	first_pfn	) +	), + +	TP_fast_assign( +		__entry->id = stream->host_id; +		__entry->pt_addr = stream->request.ringinfo.ring_pt_address; +		__entry->num_pages = stream->request.ringinfo.num_pages; +		__entry->ring_size = stream->request.ringinfo.ring_size; +		__entry->ring_offset = stream->request.ringinfo.ring_offset; +		__entry->first_pfn = stream->request.ringinfo.ring_first_pfn; +	), + +	TP_printk("stream %d ring addr 0x%x pages %d size 0x%x offset 0x%x PFN 0x%x", +		(int) __entry->id,  (int)__entry->pt_addr, +		(int)__entry->num_pages, (int)__entry->ring_size, +		(int)__entry->ring_offset, (int)__entry->first_pfn) +); + +TRACE_EVENT(hsw_stream_alloc_reply, + +	TP_PROTO(struct sst_hsw_stream *stream), + +	TP_ARGS(stream), + +	TP_STRUCT__entry( +		__field(	int,	id	) +		__field(	int,	stream_id	) +		__field(	int,	mixer_id	) +		__field(	int,	peak0	) +		__field(	int,	peak1	) +		__field(	int,	vol0	) +		__field(	int,	vol1	) +	), + +	TP_fast_assign( +		__entry->id = stream->host_id; +		__entry->stream_id = stream->reply.stream_hw_id; +		__entry->mixer_id = stream->reply.mixer_hw_id; +		__entry->peak0 = stream->reply.peak_meter_register_address[0]; +		__entry->peak1 = stream->reply.peak_meter_register_address[1]; +		__entry->vol0 = stream->reply.volume_register_address[0]; +		__entry->vol1 = stream->reply.volume_register_address[1]; +	), + +	TP_printk("stream %d hw id %d mixer %d peak 0x%x:0x%x vol 0x%x,0x%x", +		(int) __entry->id, (int) __entry->stream_id, (int)__entry->mixer_id, +		(int)__entry->peak0, (int)__entry->peak1, +		(int)__entry->vol0, (int)__entry->vol1) +); + +TRACE_EVENT(hsw_mixer_info_reply, + +	TP_PROTO(struct sst_hsw_ipc_stream_info_reply *reply), + +	TP_ARGS(reply), + +	TP_STRUCT__entry( +		__field(	int,	mixer_id	) +		__field(	int,	peak0	) +		__field(	int,	peak1	) +		__field(	int,	vol0	) +		__field(	int,	vol1	) +	), + +	TP_fast_assign( +		__entry->mixer_id = reply->mixer_hw_id; +		__entry->peak0 = reply->peak_meter_register_address[0]; +		__entry->peak1 = reply->peak_meter_register_address[1]; +		__entry->vol0 = reply->volume_register_address[0]; +		__entry->vol1 = reply->volume_register_address[1]; +	), + +	TP_printk("mixer id %d peak 0x%x:0x%x vol 0x%x,0x%x", +		(int)__entry->mixer_id, +		(int)__entry->peak0, (int)__entry->peak1, +		(int)__entry->vol0, (int)__entry->vol1) +); + +TRACE_EVENT(hsw_stream_data_format, + +	TP_PROTO(struct sst_hsw_stream *stream, +		struct sst_hsw_audio_data_format_ipc *req), + +	TP_ARGS(stream, req), + +	TP_STRUCT__entry( +		__field(	uint32_t,	id	) +		__field(	uint32_t,	frequency	) +		__field(	uint32_t,	bitdepth	) +		__field(	uint32_t,	map	) +		__field(	uint32_t,	config	) +		__field(	uint32_t,	style	) +		__field(	uint8_t,	ch_num	) +		__field(	uint8_t,	valid_bit	) +	), + +	TP_fast_assign( +		__entry->id = stream->host_id; +		__entry->frequency = req->frequency; +		__entry->bitdepth = req->bitdepth; +		__entry->map = req->map; +		__entry->config = req->config; +		__entry->style = req->style; +		__entry->ch_num = req->ch_num; +		__entry->valid_bit = req->valid_bit; +	), + +	TP_printk("stream %d freq %d depth %d map 0x%x config 0x%x style 0x%x ch %d bits %d", +		(int) __entry->id, (uint32_t)__entry->frequency, +		(uint32_t)__entry->bitdepth, (uint32_t)__entry->map, +		(uint32_t)__entry->config, (uint32_t)__entry->style, +		(uint8_t)__entry->ch_num, (uint8_t)__entry->valid_bit) +); + +TRACE_EVENT(hsw_stream_alloc_request, + +	TP_PROTO(struct sst_hsw_stream *stream, +		struct sst_hsw_ipc_stream_alloc_req *req), + +	TP_ARGS(stream, req), + +	TP_STRUCT__entry( +		__field(	uint32_t,	id	) +		__field(	uint8_t,	path_id	) +		__field(	uint8_t,	stream_type	) +		__field(	uint8_t,	format_id	) +	), + +	TP_fast_assign( +		__entry->id = stream->host_id; +		__entry->path_id = req->path_id; +		__entry->stream_type = req->stream_type; +		__entry->format_id = req->format_id; +	), + +	TP_printk("stream %d path %d type %d format %d", +		(int) __entry->id, (uint8_t)__entry->path_id, +		(uint8_t)__entry->stream_type, (uint8_t)__entry->format_id) +); + +TRACE_EVENT(hsw_stream_free_req, + +	TP_PROTO(struct sst_hsw_stream *stream, +		struct sst_hsw_ipc_stream_free_req *req), + +	TP_ARGS(stream, req), + +	TP_STRUCT__entry( +		__field(	int,	id	) +		__field(	int,	stream_id	) +	), + +	TP_fast_assign( +		__entry->id = stream->host_id; +		__entry->stream_id = req->stream_id; +	), + +	TP_printk("stream %d hw id %d", +		(int) __entry->id, (int) __entry->stream_id) +); + +TRACE_EVENT(hsw_volume_req, + +	TP_PROTO(struct sst_hsw_stream *stream, +		struct sst_hsw_ipc_volume_req *req), + +	TP_ARGS(stream, req), + +	TP_STRUCT__entry( +		__field(	int,	id	) +		__field(	uint32_t,	channel	) +		__field(	uint32_t,	target_volume	) +		__field(	uint64_t,	curve_duration	) +		__field(	uint32_t,	curve_type	) +	), + +	TP_fast_assign( +		__entry->id = stream->host_id; +		__entry->channel = req->channel; +		__entry->target_volume = req->target_volume; +		__entry->curve_duration = req->curve_duration; +		__entry->curve_type = req->curve_type; +	), + +	TP_printk("stream %d chan 0x%x vol %d duration %llu type %d", +		(int) __entry->id, (uint32_t) __entry->channel, +		(uint32_t)__entry->target_volume, +		(uint64_t)__entry->curve_duration, +		(uint32_t)__entry->curve_type) +); + +TRACE_EVENT(hsw_device_config_req, + +	TP_PROTO(struct sst_hsw_ipc_device_config_req *req), + +	TP_ARGS(req), + +	TP_STRUCT__entry( +		__field(	uint32_t,	ssp	) +		__field(	uint32_t,	clock_freq	) +		__field(	uint32_t,	mode	) +		__field(	uint16_t,	clock_divider	) +	), + +	TP_fast_assign( +		__entry->ssp = req->ssp_interface; +		__entry->clock_freq = req->clock_frequency; +		__entry->mode = req->mode; +		__entry->clock_divider = req->clock_divider; +	), + +	TP_printk("SSP %d Freq %d mode %d div %d", +		(uint32_t)__entry->ssp, +		(uint32_t)__entry->clock_freq, (uint32_t)__entry->mode, +		(uint32_t)__entry->clock_divider) +); + +#endif /* _TRACE_HSWADSP_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/i2c.h b/include/trace/events/i2c.h new file mode 100644 index 00000000000..fe17187df65 --- /dev/null +++ b/include/trace/events/i2c.h @@ -0,0 +1,372 @@ +/* I2C and SMBUS message transfer tracepoints + * + * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM i2c + +#if !defined(_TRACE_I2C_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_I2C_H + +#include <linux/i2c.h> +#include <linux/tracepoint.h> + +/* + * drivers/i2c/i2c-core.c + */ +extern void i2c_transfer_trace_reg(void); +extern void i2c_transfer_trace_unreg(void); + +/* + * __i2c_transfer() write request + */ +TRACE_EVENT_FN(i2c_write, +	       TP_PROTO(const struct i2c_adapter *adap, const struct i2c_msg *msg, +			int num), +	       TP_ARGS(adap, msg, num), +	       TP_STRUCT__entry( +		       __field(int,	adapter_nr		) +		       __field(__u16,	msg_nr			) +		       __field(__u16,	addr			) +		       __field(__u16,	flags			) +		       __field(__u16,	len			) +		       __dynamic_array(__u8, buf, msg->len)	), +	       TP_fast_assign( +		       __entry->adapter_nr = adap->nr; +		       __entry->msg_nr = num; +		       __entry->addr = msg->addr; +		       __entry->flags = msg->flags; +		       __entry->len = msg->len; +		       memcpy(__get_dynamic_array(buf), msg->buf, msg->len); +			      ), +	       TP_printk("i2c-%d #%u a=%03x f=%04x l=%u [%*phD]", +			 __entry->adapter_nr, +			 __entry->msg_nr, +			 __entry->addr, +			 __entry->flags, +			 __entry->len, +			 __entry->len, __get_dynamic_array(buf) +			 ), +	       i2c_transfer_trace_reg, +	       i2c_transfer_trace_unreg); + +/* + * __i2c_transfer() read request + */ +TRACE_EVENT_FN(i2c_read, +	       TP_PROTO(const struct i2c_adapter *adap, const struct i2c_msg *msg, +			int num), +	       TP_ARGS(adap, msg, num), +	       TP_STRUCT__entry( +		       __field(int,	adapter_nr		) +		       __field(__u16,	msg_nr			) +		       __field(__u16,	addr			) +		       __field(__u16,	flags			) +		       __field(__u16,	len			) +				), +	       TP_fast_assign( +		       __entry->adapter_nr = adap->nr; +		       __entry->msg_nr = num; +		       __entry->addr = msg->addr; +		       __entry->flags = msg->flags; +		       __entry->len = msg->len; +			      ), +	       TP_printk("i2c-%d #%u a=%03x f=%04x l=%u", +			 __entry->adapter_nr, +			 __entry->msg_nr, +			 __entry->addr, +			 __entry->flags, +			 __entry->len +			 ), +	       i2c_transfer_trace_reg, +		       i2c_transfer_trace_unreg); + +/* + * __i2c_transfer() read reply + */ +TRACE_EVENT_FN(i2c_reply, +	       TP_PROTO(const struct i2c_adapter *adap, const struct i2c_msg *msg, +			int num), +	       TP_ARGS(adap, msg, num), +	       TP_STRUCT__entry( +		       __field(int,	adapter_nr		) +		       __field(__u16,	msg_nr			) +		       __field(__u16,	addr			) +		       __field(__u16,	flags			) +		       __field(__u16,	len			) +		       __dynamic_array(__u8, buf, msg->len)	), +	       TP_fast_assign( +		       __entry->adapter_nr = adap->nr; +		       __entry->msg_nr = num; +		       __entry->addr = msg->addr; +		       __entry->flags = msg->flags; +		       __entry->len = msg->len; +		       memcpy(__get_dynamic_array(buf), msg->buf, msg->len); +			      ), +	       TP_printk("i2c-%d #%u a=%03x f=%04x l=%u [%*phD]", +			 __entry->adapter_nr, +			 __entry->msg_nr, +			 __entry->addr, +			 __entry->flags, +			 __entry->len, +			 __entry->len, __get_dynamic_array(buf) +			 ), +	       i2c_transfer_trace_reg, +	       i2c_transfer_trace_unreg); + +/* + * __i2c_transfer() result + */ +TRACE_EVENT_FN(i2c_result, +	       TP_PROTO(const struct i2c_adapter *adap, int num, int ret), +	       TP_ARGS(adap, num, ret), +	       TP_STRUCT__entry( +		       __field(int,	adapter_nr		) +		       __field(__u16,	nr_msgs			) +		       __field(__s16,	ret			) +				), +	       TP_fast_assign( +		       __entry->adapter_nr = adap->nr; +		       __entry->nr_msgs = num; +		       __entry->ret = ret; +			      ), +	       TP_printk("i2c-%d n=%u ret=%d", +			 __entry->adapter_nr, +			 __entry->nr_msgs, +			 __entry->ret +			 ), +	       i2c_transfer_trace_reg, +	       i2c_transfer_trace_unreg); + +/* + * i2c_smbus_xfer() write data or procedure call request + */ +TRACE_EVENT_CONDITION(smbus_write, +	TP_PROTO(const struct i2c_adapter *adap, +		 u16 addr, unsigned short flags, +		 char read_write, u8 command, int protocol, +		 const union i2c_smbus_data *data), +	TP_ARGS(adap, addr, flags, read_write, command, protocol, data), +	TP_CONDITION(read_write == I2C_SMBUS_WRITE || +		     protocol == I2C_SMBUS_PROC_CALL || +		     protocol == I2C_SMBUS_BLOCK_PROC_CALL), +	TP_STRUCT__entry( +		__field(int,	adapter_nr		) +		__field(__u16,	addr			) +		__field(__u16,	flags			) +		__field(__u8,	command			) +		__field(__u8,	len			) +		__field(__u32,	protocol		) +		__array(__u8, buf, I2C_SMBUS_BLOCK_MAX + 2)	), +	TP_fast_assign( +		__entry->adapter_nr = adap->nr; +		__entry->addr = addr; +		__entry->flags = flags; +		__entry->command = command; +		__entry->protocol = protocol; + +		switch (protocol) { +		case I2C_SMBUS_BYTE_DATA: +			__entry->len = 1; +			goto copy; +		case I2C_SMBUS_WORD_DATA: +		case I2C_SMBUS_PROC_CALL: +			__entry->len = 2; +			goto copy; +		case I2C_SMBUS_BLOCK_DATA: +		case I2C_SMBUS_BLOCK_PROC_CALL: +		case I2C_SMBUS_I2C_BLOCK_DATA: +			__entry->len = data->block[0] + 1; +		copy: +			memcpy(__entry->buf, data->block, __entry->len); +			break; +		case I2C_SMBUS_QUICK: +		case I2C_SMBUS_BYTE: +		case I2C_SMBUS_I2C_BLOCK_BROKEN: +		default: +			__entry->len = 0; +		} +		       ), +	TP_printk("i2c-%d a=%03x f=%04x c=%x %s l=%u [%*phD]", +		  __entry->adapter_nr, +		  __entry->addr, +		  __entry->flags, +		  __entry->command, +		  __print_symbolic(__entry->protocol, +				   { I2C_SMBUS_QUICK,		"QUICK"	}, +				   { I2C_SMBUS_BYTE,		"BYTE"	}, +				   { I2C_SMBUS_BYTE_DATA,		"BYTE_DATA" }, +				   { I2C_SMBUS_WORD_DATA,		"WORD_DATA" }, +				   { I2C_SMBUS_PROC_CALL,		"PROC_CALL" }, +				   { I2C_SMBUS_BLOCK_DATA,		"BLOCK_DATA" }, +				   { I2C_SMBUS_I2C_BLOCK_BROKEN,	"I2C_BLOCK_BROKEN" }, +				   { I2C_SMBUS_BLOCK_PROC_CALL,	"BLOCK_PROC_CALL" }, +				   { I2C_SMBUS_I2C_BLOCK_DATA,	"I2C_BLOCK_DATA" }), +		  __entry->len, +		  __entry->len, __entry->buf +		  )); + +/* + * i2c_smbus_xfer() read data request + */ +TRACE_EVENT_CONDITION(smbus_read, +	TP_PROTO(const struct i2c_adapter *adap, +		 u16 addr, unsigned short flags, +		 char read_write, u8 command, int protocol), +	TP_ARGS(adap, addr, flags, read_write, command, protocol), +	TP_CONDITION(!(read_write == I2C_SMBUS_WRITE || +		       protocol == I2C_SMBUS_PROC_CALL || +		       protocol == I2C_SMBUS_BLOCK_PROC_CALL)), +	TP_STRUCT__entry( +		__field(int,	adapter_nr		) +		__field(__u16,	flags			) +		__field(__u16,	addr			) +		__field(__u8,	command			) +		__field(__u32,	protocol		) +		__array(__u8, buf, I2C_SMBUS_BLOCK_MAX + 2)	), +	TP_fast_assign( +		__entry->adapter_nr = adap->nr; +		__entry->addr = addr; +		__entry->flags = flags; +		__entry->command = command; +		__entry->protocol = protocol; +		       ), +	TP_printk("i2c-%d a=%03x f=%04x c=%x %s", +		  __entry->adapter_nr, +		  __entry->addr, +		  __entry->flags, +		  __entry->command, +		  __print_symbolic(__entry->protocol, +				   { I2C_SMBUS_QUICK,		"QUICK"	}, +				   { I2C_SMBUS_BYTE,		"BYTE"	}, +				   { I2C_SMBUS_BYTE_DATA,		"BYTE_DATA" }, +				   { I2C_SMBUS_WORD_DATA,		"WORD_DATA" }, +				   { I2C_SMBUS_PROC_CALL,		"PROC_CALL" }, +				   { I2C_SMBUS_BLOCK_DATA,		"BLOCK_DATA" }, +				   { I2C_SMBUS_I2C_BLOCK_BROKEN,	"I2C_BLOCK_BROKEN" }, +				   { I2C_SMBUS_BLOCK_PROC_CALL,	"BLOCK_PROC_CALL" }, +				   { I2C_SMBUS_I2C_BLOCK_DATA,	"I2C_BLOCK_DATA" }) +		  )); + +/* + * i2c_smbus_xfer() read data or procedure call reply + */ +TRACE_EVENT_CONDITION(smbus_reply, +	TP_PROTO(const struct i2c_adapter *adap, +		 u16 addr, unsigned short flags, +		 char read_write, u8 command, int protocol, +		 const union i2c_smbus_data *data), +	TP_ARGS(adap, addr, flags, read_write, command, protocol, data), +	TP_CONDITION(read_write == I2C_SMBUS_READ), +	TP_STRUCT__entry( +		__field(int,	adapter_nr		) +		__field(__u16,	addr			) +		__field(__u16,	flags			) +		__field(__u8,	command			) +		__field(__u8,	len			) +		__field(__u32,	protocol		) +		__array(__u8, buf, I2C_SMBUS_BLOCK_MAX + 2)	), +	TP_fast_assign( +		__entry->adapter_nr = adap->nr; +		__entry->addr = addr; +		__entry->flags = flags; +		__entry->command = command; +		__entry->protocol = protocol; + +		switch (protocol) { +		case I2C_SMBUS_BYTE: +		case I2C_SMBUS_BYTE_DATA: +			__entry->len = 1; +			goto copy; +		case I2C_SMBUS_WORD_DATA: +		case I2C_SMBUS_PROC_CALL: +			__entry->len = 2; +			goto copy; +		case I2C_SMBUS_BLOCK_DATA: +		case I2C_SMBUS_BLOCK_PROC_CALL: +		case I2C_SMBUS_I2C_BLOCK_DATA: +			__entry->len = data->block[0] + 1; +		copy: +			memcpy(__entry->buf, data->block, __entry->len); +			break; +		case I2C_SMBUS_QUICK: +		case I2C_SMBUS_I2C_BLOCK_BROKEN: +		default: +			__entry->len = 0; +		} +		       ), +	TP_printk("i2c-%d a=%03x f=%04x c=%x %s l=%u [%*phD]", +		  __entry->adapter_nr, +		  __entry->addr, +		  __entry->flags, +		  __entry->command, +		  __print_symbolic(__entry->protocol, +				   { I2C_SMBUS_QUICK,		"QUICK"	}, +				   { I2C_SMBUS_BYTE,		"BYTE"	}, +				   { I2C_SMBUS_BYTE_DATA,		"BYTE_DATA" }, +				   { I2C_SMBUS_WORD_DATA,		"WORD_DATA" }, +				   { I2C_SMBUS_PROC_CALL,		"PROC_CALL" }, +				   { I2C_SMBUS_BLOCK_DATA,		"BLOCK_DATA" }, +				   { I2C_SMBUS_I2C_BLOCK_BROKEN,	"I2C_BLOCK_BROKEN" }, +				   { I2C_SMBUS_BLOCK_PROC_CALL,	"BLOCK_PROC_CALL" }, +				   { I2C_SMBUS_I2C_BLOCK_DATA,	"I2C_BLOCK_DATA" }), +		  __entry->len, +		  __entry->len, __entry->buf +		  )); + +/* + * i2c_smbus_xfer() result + */ +TRACE_EVENT(smbus_result, +	    TP_PROTO(const struct i2c_adapter *adap, +		     u16 addr, unsigned short flags, +		     char read_write, u8 command, int protocol, +		     int res), +	    TP_ARGS(adap, addr, flags, read_write, command, protocol, res), +	    TP_STRUCT__entry( +		    __field(int,	adapter_nr		) +		    __field(__u16,	addr			) +		    __field(__u16,	flags			) +		    __field(__u8,	read_write		) +		    __field(__u8,	command			) +		    __field(__s16,	res			) +		    __field(__u32,	protocol		) +			     ), +	    TP_fast_assign( +		    __entry->adapter_nr = adap->nr; +		    __entry->addr = addr; +		    __entry->flags = flags; +		    __entry->read_write = read_write; +		    __entry->command = command; +		    __entry->protocol = protocol; +		    __entry->res = res; +			   ), +	    TP_printk("i2c-%d a=%03x f=%04x c=%x %s %s res=%d", +		      __entry->adapter_nr, +		      __entry->addr, +		      __entry->flags, +		      __entry->command, +		      __print_symbolic(__entry->protocol, +				       { I2C_SMBUS_QUICK,		"QUICK"	}, +				       { I2C_SMBUS_BYTE,		"BYTE"	}, +				       { I2C_SMBUS_BYTE_DATA,		"BYTE_DATA" }, +				       { I2C_SMBUS_WORD_DATA,		"WORD_DATA" }, +				       { I2C_SMBUS_PROC_CALL,		"PROC_CALL" }, +				       { I2C_SMBUS_BLOCK_DATA,		"BLOCK_DATA" }, +				       { I2C_SMBUS_I2C_BLOCK_BROKEN,	"I2C_BLOCK_BROKEN" }, +				       { I2C_SMBUS_BLOCK_PROC_CALL,	"BLOCK_PROC_CALL" }, +				       { I2C_SMBUS_I2C_BLOCK_DATA,	"I2C_BLOCK_DATA" }), +		      __entry->read_write == I2C_SMBUS_WRITE ? "wr" : "rd", +		      __entry->res +		      )); + +#endif /* _TRACE_I2C_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/intel-sst.h b/include/trace/events/intel-sst.h new file mode 100644 index 00000000000..76c72d3f190 --- /dev/null +++ b/include/trace/events/intel-sst.h @@ -0,0 +1,148 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM intel-sst + +#if !defined(_TRACE_INTEL_SST_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_INTEL_SST_H + +#include <linux/types.h> +#include <linux/ktime.h> +#include <linux/tracepoint.h> + +DECLARE_EVENT_CLASS(sst_ipc_msg, + +	TP_PROTO(unsigned int val), + +	TP_ARGS(val), + +	TP_STRUCT__entry( +		__field(	unsigned int,	val		) +	), + +	TP_fast_assign( +		__entry->val = val; +	), + +	TP_printk("0x%8.8x", (unsigned int)__entry->val) +); + +DEFINE_EVENT(sst_ipc_msg, sst_ipc_msg_tx, + +	TP_PROTO(unsigned int val), + +	TP_ARGS(val) + +); + +DEFINE_EVENT(sst_ipc_msg, sst_ipc_msg_rx, + +	TP_PROTO(unsigned int val), + +	TP_ARGS(val) + +); + +DECLARE_EVENT_CLASS(sst_ipc_mailbox, + +	TP_PROTO(unsigned int offset, unsigned int val), + +	TP_ARGS(offset, val), + +	TP_STRUCT__entry( +		__field(	unsigned int,	offset		) +		__field(	unsigned int,	val		) +	), + +	TP_fast_assign( +		__entry->offset = offset; +		__entry->val = val; +	), + +	TP_printk(" 0x%4.4x = 0x%8.8x", +		(unsigned int)__entry->offset, (unsigned int)__entry->val) +); + +DEFINE_EVENT(sst_ipc_mailbox, sst_ipc_inbox_rdata, + +	TP_PROTO(unsigned int offset, unsigned int val), + +	TP_ARGS(offset, val) + +); + +DEFINE_EVENT(sst_ipc_mailbox, sst_ipc_inbox_wdata, + +	TP_PROTO(unsigned int offset, unsigned int val), + +	TP_ARGS(offset, val) + +); + +DEFINE_EVENT(sst_ipc_mailbox, sst_ipc_outbox_rdata, + +	TP_PROTO(unsigned int offset, unsigned int val), + +	TP_ARGS(offset, val) + +); + +DEFINE_EVENT(sst_ipc_mailbox, sst_ipc_outbox_wdata, + +	TP_PROTO(unsigned int offset, unsigned int val), + +	TP_ARGS(offset, val) + +); + +DECLARE_EVENT_CLASS(sst_ipc_mailbox_info, + +	TP_PROTO(unsigned int size), + +	TP_ARGS(size), + +	TP_STRUCT__entry( +		__field(	unsigned int,	size		) +	), + +	TP_fast_assign( +		__entry->size = size; +	), + +	TP_printk("Mailbox bytes 0x%8.8x", (unsigned int)__entry->size) +); + +DEFINE_EVENT(sst_ipc_mailbox_info, sst_ipc_inbox_read, + +	TP_PROTO(unsigned int size), + +	TP_ARGS(size) + +); + +DEFINE_EVENT(sst_ipc_mailbox_info, sst_ipc_inbox_write, + +	TP_PROTO(unsigned int size), + +	TP_ARGS(size) + +); + +DEFINE_EVENT(sst_ipc_mailbox_info, sst_ipc_outbox_read, + +	TP_PROTO(unsigned int size), + +	TP_ARGS(size) + +); + +DEFINE_EVENT(sst_ipc_mailbox_info, sst_ipc_outbox_write, + +	TP_PROTO(unsigned int size), + +	TP_ARGS(size) + +); + +#endif /* _TRACE_SST_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/iommu.h b/include/trace/events/iommu.h new file mode 100644 index 00000000000..a8f5c32d174 --- /dev/null +++ b/include/trace/events/iommu.h @@ -0,0 +1,162 @@ +/* + * iommu trace points + * + * Copyright (C) 2013 Shuah Khan <shuah.kh@samsung.com> + * + */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM iommu + +#if !defined(_TRACE_IOMMU_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_IOMMU_H + +#include <linux/tracepoint.h> +#include <linux/pci.h> + +struct device; + +DECLARE_EVENT_CLASS(iommu_group_event, + +	TP_PROTO(int group_id, struct device *dev), + +	TP_ARGS(group_id, dev), + +	TP_STRUCT__entry( +		__field(int, gid) +		__string(device, dev_name(dev)) +	), + +	TP_fast_assign( +		__entry->gid = group_id; +		__assign_str(device, dev_name(dev)); +	), + +	TP_printk("IOMMU: groupID=%d device=%s", +			__entry->gid, __get_str(device) +	) +); + +DEFINE_EVENT(iommu_group_event, add_device_to_group, + +	TP_PROTO(int group_id, struct device *dev), + +	TP_ARGS(group_id, dev) + +); + +DEFINE_EVENT(iommu_group_event, remove_device_from_group, + +	TP_PROTO(int group_id, struct device *dev), + +	TP_ARGS(group_id, dev) +); + +DECLARE_EVENT_CLASS(iommu_device_event, + +	TP_PROTO(struct device *dev), + +	TP_ARGS(dev), + +	TP_STRUCT__entry( +		__string(device, dev_name(dev)) +	), + +	TP_fast_assign( +		__assign_str(device, dev_name(dev)); +	), + +	TP_printk("IOMMU: device=%s", __get_str(device) +	) +); + +DEFINE_EVENT(iommu_device_event, attach_device_to_domain, + +	TP_PROTO(struct device *dev), + +	TP_ARGS(dev) +); + +DEFINE_EVENT(iommu_device_event, detach_device_from_domain, + +	TP_PROTO(struct device *dev), + +	TP_ARGS(dev) +); + +DECLARE_EVENT_CLASS(iommu_map_unmap, + +	TP_PROTO(unsigned long iova, phys_addr_t paddr, size_t size), + +	TP_ARGS(iova, paddr, size), + +	TP_STRUCT__entry( +		__field(u64, iova) +		__field(u64, paddr) +		__field(int, size) +	), + +	TP_fast_assign( +		__entry->iova = iova; +		__entry->paddr = paddr; +		__entry->size = size; +	), + +	TP_printk("IOMMU: iova=0x%016llx paddr=0x%016llx size=0x%x", +			__entry->iova, __entry->paddr, __entry->size +	) +); + +DEFINE_EVENT(iommu_map_unmap, map, + +	TP_PROTO(unsigned long iova, phys_addr_t paddr, size_t size), + +	TP_ARGS(iova, paddr, size) +); + +DEFINE_EVENT_PRINT(iommu_map_unmap, unmap, + +	TP_PROTO(unsigned long iova, phys_addr_t paddr, size_t size), + +	TP_ARGS(iova, paddr, size), + +	TP_printk("IOMMU: iova=0x%016llx size=0x%x", +			__entry->iova, __entry->size +	) +); + +DECLARE_EVENT_CLASS(iommu_error, + +	TP_PROTO(struct device *dev, unsigned long iova, int flags), + +	TP_ARGS(dev, iova, flags), + +	TP_STRUCT__entry( +		__string(device, dev_name(dev)) +		__string(driver, dev_driver_string(dev)) +		__field(u64, iova) +		__field(int, flags) +	), + +	TP_fast_assign( +		__assign_str(device, dev_name(dev)); +		__assign_str(driver, dev_driver_string(dev)); +		__entry->iova = iova; +		__entry->flags = flags; +	), + +	TP_printk("IOMMU:%s %s iova=0x%016llx flags=0x%04x", +			__get_str(driver), __get_str(device), +			__entry->iova, __entry->flags +	) +); + +DEFINE_EVENT(iommu_error, io_page_fault, + +	TP_PROTO(struct device *dev, unsigned long iova, int flags), + +	TP_ARGS(dev, iova, flags) +); +#endif /* _TRACE_IOMMU_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/jbd.h b/include/trace/events/jbd.h new file mode 100644 index 00000000000..da6f2591c25 --- /dev/null +++ b/include/trace/events/jbd.h @@ -0,0 +1,194 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM jbd + +#if !defined(_TRACE_JBD_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_JBD_H + +#include <linux/jbd.h> +#include <linux/tracepoint.h> + +TRACE_EVENT(jbd_checkpoint, + +	TP_PROTO(journal_t *journal, int result), + +	TP_ARGS(journal, result), + +	TP_STRUCT__entry( +		__field(	dev_t,	dev			) +		__field(	int,	result			) +	), + +	TP_fast_assign( +		__entry->dev		= journal->j_fs_dev->bd_dev; +		__entry->result		= result; +	), + +	TP_printk("dev %d,%d result %d", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  __entry->result) +); + +DECLARE_EVENT_CLASS(jbd_commit, + +	TP_PROTO(journal_t *journal, transaction_t *commit_transaction), + +	TP_ARGS(journal, commit_transaction), + +	TP_STRUCT__entry( +		__field(	dev_t,	dev			) +		__field(	int,	transaction		) +	), + +	TP_fast_assign( +		__entry->dev		= journal->j_fs_dev->bd_dev; +		__entry->transaction	= commit_transaction->t_tid; +	), + +	TP_printk("dev %d,%d transaction %d", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  __entry->transaction) +); + +DEFINE_EVENT(jbd_commit, jbd_start_commit, + +	TP_PROTO(journal_t *journal, transaction_t *commit_transaction), + +	TP_ARGS(journal, commit_transaction) +); + +DEFINE_EVENT(jbd_commit, jbd_commit_locking, + +	TP_PROTO(journal_t *journal, transaction_t *commit_transaction), + +	TP_ARGS(journal, commit_transaction) +); + +DEFINE_EVENT(jbd_commit, jbd_commit_flushing, + +	TP_PROTO(journal_t *journal, transaction_t *commit_transaction), + +	TP_ARGS(journal, commit_transaction) +); + +DEFINE_EVENT(jbd_commit, jbd_commit_logging, + +	TP_PROTO(journal_t *journal, transaction_t *commit_transaction), + +	TP_ARGS(journal, commit_transaction) +); + +TRACE_EVENT(jbd_drop_transaction, + +	TP_PROTO(journal_t *journal, transaction_t *commit_transaction), + +	TP_ARGS(journal, commit_transaction), + +	TP_STRUCT__entry( +		__field(	dev_t,	dev			) +		__field(	int,	transaction		) +	), + +	TP_fast_assign( +		__entry->dev		= journal->j_fs_dev->bd_dev; +		__entry->transaction	= commit_transaction->t_tid; +	), + +	TP_printk("dev %d,%d transaction %d", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  __entry->transaction) +); + +TRACE_EVENT(jbd_end_commit, +	TP_PROTO(journal_t *journal, transaction_t *commit_transaction), + +	TP_ARGS(journal, commit_transaction), + +	TP_STRUCT__entry( +		__field(	dev_t,	dev			) +		__field(	int,	transaction		) +		__field(	int,	head			) +	), + +	TP_fast_assign( +		__entry->dev		= journal->j_fs_dev->bd_dev; +		__entry->transaction	= commit_transaction->t_tid; +		__entry->head		= journal->j_tail_sequence; +	), + +	TP_printk("dev %d,%d transaction %d head %d", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  __entry->transaction, __entry->head) +); + +TRACE_EVENT(jbd_do_submit_data, +	TP_PROTO(journal_t *journal, transaction_t *commit_transaction), + +	TP_ARGS(journal, commit_transaction), + +	TP_STRUCT__entry( +		__field(	dev_t,	dev			) +		__field(	int,	transaction		) +	), + +	TP_fast_assign( +		__entry->dev		= journal->j_fs_dev->bd_dev; +		__entry->transaction	= commit_transaction->t_tid; +	), + +	TP_printk("dev %d,%d transaction %d", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		   __entry->transaction) +); + +TRACE_EVENT(jbd_cleanup_journal_tail, + +	TP_PROTO(journal_t *journal, tid_t first_tid, +		 unsigned long block_nr, unsigned long freed), + +	TP_ARGS(journal, first_tid, block_nr, freed), + +	TP_STRUCT__entry( +		__field(	dev_t,	dev			) +		__field(	tid_t,	tail_sequence		) +		__field(	tid_t,	first_tid		) +		__field(unsigned long,	block_nr		) +		__field(unsigned long,	freed			) +	), + +	TP_fast_assign( +		__entry->dev		= journal->j_fs_dev->bd_dev; +		__entry->tail_sequence	= journal->j_tail_sequence; +		__entry->first_tid	= first_tid; +		__entry->block_nr	= block_nr; +		__entry->freed		= freed; +	), + +	TP_printk("dev %d,%d from %u to %u offset %lu freed %lu", +		  MAJOR(__entry->dev), MINOR(__entry->dev), +		  __entry->tail_sequence, __entry->first_tid, +		  __entry->block_nr, __entry->freed) +); + +TRACE_EVENT(journal_write_superblock, +	TP_PROTO(journal_t *journal, int write_op), + +	TP_ARGS(journal, write_op), + +	TP_STRUCT__entry( +		__field(	dev_t,	dev			) +		__field(	int,	write_op		) +	), + +	TP_fast_assign( +		__entry->dev		= journal->j_fs_dev->bd_dev; +		__entry->write_op	= write_op; +	), + +	TP_printk("dev %d,%d write_op %x", MAJOR(__entry->dev), +		  MINOR(__entry->dev), __entry->write_op) +); + +#endif /* _TRACE_JBD_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/jbd2.h b/include/trace/events/jbd2.h index 7447ea9305b..c1d1f3eb242 100644 --- a/include/trace/events/jbd2.h +++ b/include/trace/events/jbd2.h @@ -17,19 +17,17 @@ TRACE_EVENT(jbd2_checkpoint,  	TP_ARGS(journal, result),  	TP_STRUCT__entry( -		__field(	int,	dev_major		) -		__field(	int,	dev_minor		) +		__field(	dev_t,	dev			)  		__field(	int,	result			)  	),  	TP_fast_assign( -		__entry->dev_major	= MAJOR(journal->j_fs_dev->bd_dev); -		__entry->dev_minor	= MINOR(journal->j_fs_dev->bd_dev); +		__entry->dev		= journal->j_fs_dev->bd_dev;  		__entry->result		= result;  	),  	TP_printk("dev %d,%d result %d", -		  __entry->dev_major, __entry->dev_minor, __entry->result) +		  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->result)  );  DECLARE_EVENT_CLASS(jbd2_commit, @@ -39,21 +37,19 @@ DECLARE_EVENT_CLASS(jbd2_commit,  	TP_ARGS(journal, commit_transaction),  	TP_STRUCT__entry( -		__field(	int,   dev_major                ) -		__field(	int,   dev_minor                ) +		__field(	dev_t,	dev			)  		__field(	char,	sync_commit		  )  		__field(	int,	transaction		  )  	),  	TP_fast_assign( -		__entry->dev_major	= MAJOR(journal->j_fs_dev->bd_dev); -		__entry->dev_minor	= MINOR(journal->j_fs_dev->bd_dev); +		__entry->dev		= journal->j_fs_dev->bd_dev;  		__entry->sync_commit = commit_transaction->t_synchronous_commit;  		__entry->transaction	= commit_transaction->t_tid;  	),  	TP_printk("dev %d,%d transaction %d sync %d", -		  __entry->dev_major, __entry->dev_minor, +		  MAJOR(__entry->dev), MINOR(__entry->dev),  		  __entry->transaction, __entry->sync_commit)  ); @@ -85,29 +81,34 @@ DEFINE_EVENT(jbd2_commit, jbd2_commit_logging,  	TP_ARGS(journal, commit_transaction)  ); +DEFINE_EVENT(jbd2_commit, jbd2_drop_transaction, + +	TP_PROTO(journal_t *journal, transaction_t *commit_transaction), + +	TP_ARGS(journal, commit_transaction) +); +  TRACE_EVENT(jbd2_end_commit,  	TP_PROTO(journal_t *journal, transaction_t *commit_transaction),  	TP_ARGS(journal, commit_transaction),  	TP_STRUCT__entry( -		__field(	int,   dev_major                ) -		__field(	int,   dev_minor                ) +		__field(	dev_t,	dev			)  		__field(	char,	sync_commit		  )  		__field(	int,	transaction		  )  		__field(	int,	head		  	  )  	),  	TP_fast_assign( -		__entry->dev_major	= MAJOR(journal->j_fs_dev->bd_dev); -		__entry->dev_minor	= MINOR(journal->j_fs_dev->bd_dev); +		__entry->dev		= journal->j_fs_dev->bd_dev;  		__entry->sync_commit = commit_transaction->t_synchronous_commit;  		__entry->transaction	= commit_transaction->t_tid;  		__entry->head		= journal->j_tail_sequence;  	),  	TP_printk("dev %d,%d transaction %d sync %d head %d", -		  __entry->dev_major, __entry->dev_minor, +		  MAJOR(__entry->dev), MINOR(__entry->dev),  		  __entry->transaction, __entry->sync_commit, __entry->head)  ); @@ -117,22 +118,118 @@ TRACE_EVENT(jbd2_submit_inode_data,  	TP_ARGS(inode),  	TP_STRUCT__entry( -		__field(	int,   dev_major                ) -		__field(	int,   dev_minor                ) +		__field(	dev_t,	dev			)  		__field(	ino_t,	ino			)  	),  	TP_fast_assign( -		__entry->dev_major = MAJOR(inode->i_sb->s_dev); -		__entry->dev_minor = MINOR(inode->i_sb->s_dev); +		__entry->dev	= inode->i_sb->s_dev;  		__entry->ino	= inode->i_ino;  	),  	TP_printk("dev %d,%d ino %lu", -		  __entry->dev_major, __entry->dev_minor, +		  MAJOR(__entry->dev), MINOR(__entry->dev),  		  (unsigned long) __entry->ino)  ); +TRACE_EVENT(jbd2_handle_start, +	TP_PROTO(dev_t dev, unsigned long tid, unsigned int type, +		 unsigned int line_no, int requested_blocks), + +	TP_ARGS(dev, tid, type, line_no, requested_blocks), + +	TP_STRUCT__entry( +		__field(		dev_t,	dev		) +		__field(	unsigned long,	tid		) +		__field(	 unsigned int,	type		) +		__field(	 unsigned int,	line_no		) +		__field(		  int,	requested_blocks) +	), + +	TP_fast_assign( +		__entry->dev		  = dev; +		__entry->tid		  = tid; +		__entry->type		  = type; +		__entry->line_no	  = line_no; +		__entry->requested_blocks = requested_blocks; +	), + +	TP_printk("dev %d,%d tid %lu type %u line_no %u " +		  "requested_blocks %d", +		  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid, +		  __entry->type, __entry->line_no, __entry->requested_blocks) +); + +TRACE_EVENT(jbd2_handle_extend, +	TP_PROTO(dev_t dev, unsigned long tid, unsigned int type, +		 unsigned int line_no, int buffer_credits, +		 int requested_blocks), + +	TP_ARGS(dev, tid, type, line_no, buffer_credits, requested_blocks), + +	TP_STRUCT__entry( +		__field(		dev_t,	dev		) +		__field(	unsigned long,	tid		) +		__field(	 unsigned int,	type		) +		__field(	 unsigned int,	line_no		) +		__field(		  int,	buffer_credits  ) +		__field(		  int,	requested_blocks) +	), + +	TP_fast_assign( +		__entry->dev		  = dev; +		__entry->tid		  = tid; +		__entry->type		  = type; +		__entry->line_no	  = line_no; +		__entry->buffer_credits   = buffer_credits; +		__entry->requested_blocks = requested_blocks; +	), + +	TP_printk("dev %d,%d tid %lu type %u line_no %u " +		  "buffer_credits %d requested_blocks %d", +		  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid, +		  __entry->type, __entry->line_no, __entry->buffer_credits, +		  __entry->requested_blocks) +); + +TRACE_EVENT(jbd2_handle_stats, +	TP_PROTO(dev_t dev, unsigned long tid, unsigned int type, +		 unsigned int line_no, int interval, int sync, +		 int requested_blocks, int dirtied_blocks), + +	TP_ARGS(dev, tid, type, line_no, interval, sync, +		requested_blocks, dirtied_blocks), + +	TP_STRUCT__entry( +		__field(		dev_t,	dev		) +		__field(	unsigned long,	tid		) +		__field(	 unsigned int,	type		) +		__field(	 unsigned int,	line_no		) +		__field(		  int,	interval	) +		__field(		  int,	sync		) +		__field(		  int,	requested_blocks) +		__field(		  int,	dirtied_blocks	) +	), + +	TP_fast_assign( +		__entry->dev		  = dev; +		__entry->tid		  = tid; +		__entry->type		  = type; +		__entry->line_no	  = line_no; +		__entry->interval	  = interval; +		__entry->sync		  = sync; +		__entry->requested_blocks = requested_blocks; +		__entry->dirtied_blocks	  = dirtied_blocks; +	), + +	TP_printk("dev %d,%d tid %lu type %u line_no %u interval %d " +		  "sync %d requested_blocks %d dirtied_blocks %d", +		  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid, +		  __entry->type, __entry->line_no, __entry->interval, +		  __entry->sync, __entry->requested_blocks, +		  __entry->dirtied_blocks) +); +  TRACE_EVENT(jbd2_run_stats,  	TP_PROTO(dev_t dev, unsigned long tid,  		 struct transaction_run_stats_s *stats), @@ -140,10 +237,10 @@ TRACE_EVENT(jbd2_run_stats,  	TP_ARGS(dev, tid, stats),  	TP_STRUCT__entry( -		__field(		  int,	dev_major	) -		__field(		  int,	dev_minor	) +		__field(		dev_t,	dev		)  		__field(	unsigned long,	tid		)  		__field(	unsigned long,	wait		) +		__field(	unsigned long,	request_delay	)  		__field(	unsigned long,	running		)  		__field(	unsigned long,	locked		)  		__field(	unsigned long,	flushing	) @@ -154,10 +251,10 @@ TRACE_EVENT(jbd2_run_stats,  	),  	TP_fast_assign( -		__entry->dev_major	= MAJOR(dev); -		__entry->dev_minor	= MINOR(dev); +		__entry->dev		= dev;  		__entry->tid		= tid;  		__entry->wait		= stats->rs_wait; +		__entry->request_delay	= stats->rs_request_delay;  		__entry->running	= stats->rs_running;  		__entry->locked		= stats->rs_locked;  		__entry->flushing	= stats->rs_flushing; @@ -167,10 +264,12 @@ TRACE_EVENT(jbd2_run_stats,  		__entry->blocks_logged	= stats->rs_blocks_logged;  	), -	TP_printk("dev %d,%d tid %lu wait %u running %u locked %u flushing %u " -		  "logging %u handle_count %u blocks %u blocks_logged %u", -		  __entry->dev_major, __entry->dev_minor, __entry->tid, +	TP_printk("dev %d,%d tid %lu wait %u request_delay %u running %u " +		  "locked %u flushing %u logging %u handle_count %u " +		  "blocks %u blocks_logged %u", +		  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid,  		  jiffies_to_msecs(__entry->wait), +		  jiffies_to_msecs(__entry->request_delay),  		  jiffies_to_msecs(__entry->running),  		  jiffies_to_msecs(__entry->locked),  		  jiffies_to_msecs(__entry->flushing), @@ -186,8 +285,7 @@ TRACE_EVENT(jbd2_checkpoint_stats,  	TP_ARGS(dev, tid, stats),  	TP_STRUCT__entry( -		__field(		  int,	dev_major	) -		__field(		  int,	dev_minor	) +		__field(		dev_t,	dev		)  		__field(	unsigned long,	tid		)  		__field(	unsigned long,	chp_time	)  		__field(		__u32,	forced_to_close	) @@ -196,8 +294,7 @@ TRACE_EVENT(jbd2_checkpoint_stats,  	),  	TP_fast_assign( -		__entry->dev_major	= MAJOR(dev); -		__entry->dev_minor	= MINOR(dev); +		__entry->dev		= dev;  		__entry->tid		= tid;  		__entry->chp_time	= stats->cs_chp_time;  		__entry->forced_to_close= stats->cs_forced_to_close; @@ -207,12 +304,12 @@ TRACE_EVENT(jbd2_checkpoint_stats,  	TP_printk("dev %d,%d tid %lu chp_time %u forced_to_close %u "  		  "written %u dropped %u", -		  __entry->dev_major, __entry->dev_minor, __entry->tid, +		  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid,  		  jiffies_to_msecs(__entry->chp_time),  		  __entry->forced_to_close, __entry->written, __entry->dropped)  ); -TRACE_EVENT(jbd2_cleanup_journal_tail, +TRACE_EVENT(jbd2_update_log_tail,  	TP_PROTO(journal_t *journal, tid_t first_tid,  		 unsigned long block_nr, unsigned long freed), @@ -220,8 +317,7 @@ TRACE_EVENT(jbd2_cleanup_journal_tail,  	TP_ARGS(journal, first_tid, block_nr, freed),  	TP_STRUCT__entry( -		__field(	int,   dev_major                ) -		__field(	int,   dev_minor                ) +		__field(	dev_t,	dev			)  		__field(	tid_t,	tail_sequence		)  		__field(	tid_t,	first_tid		)  		__field(unsigned long,	block_nr		) @@ -229,8 +325,7 @@ TRACE_EVENT(jbd2_cleanup_journal_tail,  	),  	TP_fast_assign( -		__entry->dev_major	= MAJOR(journal->j_fs_dev->bd_dev); -		__entry->dev_minor	= MINOR(journal->j_fs_dev->bd_dev); +		__entry->dev		= journal->j_fs_dev->bd_dev;  		__entry->tail_sequence	= journal->j_tail_sequence;  		__entry->first_tid	= first_tid;  		__entry->block_nr	= block_nr; @@ -238,11 +333,52 @@ TRACE_EVENT(jbd2_cleanup_journal_tail,  	),  	TP_printk("dev %d,%d from %u to %u offset %lu freed %lu", -		  __entry->dev_major, __entry->dev_minor, +		  MAJOR(__entry->dev), MINOR(__entry->dev),  		  __entry->tail_sequence, __entry->first_tid,  		  __entry->block_nr, __entry->freed)  ); +TRACE_EVENT(jbd2_write_superblock, + +	TP_PROTO(journal_t *journal, int write_op), + +	TP_ARGS(journal, write_op), + +	TP_STRUCT__entry( +		__field(	dev_t,  dev			) +		__field(	  int,  write_op		) +	), + +	TP_fast_assign( +		__entry->dev		= journal->j_fs_dev->bd_dev; +		__entry->write_op	= write_op; +	), + +	TP_printk("dev %d,%d write_op %x", MAJOR(__entry->dev), +		  MINOR(__entry->dev), __entry->write_op) +); + +TRACE_EVENT(jbd2_lock_buffer_stall, + +	TP_PROTO(dev_t dev, unsigned long stall_ms), + +	TP_ARGS(dev, stall_ms), + +	TP_STRUCT__entry( +		__field(        dev_t, dev	) +		__field(unsigned long, stall_ms	) +	), + +	TP_fast_assign( +		__entry->dev		= dev; +		__entry->stall_ms	= stall_ms; +	), + +	TP_printk("dev %d,%d stall_ms %lu", +		MAJOR(__entry->dev), MINOR(__entry->dev), +		__entry->stall_ms) +); +  #endif /* _TRACE_JBD2_H */  /* This part must be outside protection */ diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h index a9c87ad8331..aece1346ceb 100644 --- a/include/trace/events/kmem.h +++ b/include/trace/events/kmem.h @@ -6,7 +6,7 @@  #include <linux/types.h>  #include <linux/tracepoint.h> -#include "gfpflags.h" +#include <trace/events/gfpflags.h>  DECLARE_EVENT_CLASS(kmem_alloc, @@ -147,7 +147,7 @@ DEFINE_EVENT(kmem_free, kmem_cache_free,  	TP_ARGS(call_site, ptr)  ); -TRACE_EVENT(mm_page_free_direct, +TRACE_EVENT(mm_page_free,  	TP_PROTO(struct page *page, unsigned int order), @@ -169,7 +169,7 @@ TRACE_EVENT(mm_page_free_direct,  			__entry->order)  ); -TRACE_EVENT(mm_pagevec_free, +TRACE_EVENT(mm_page_free_batched,  	TP_PROTO(struct page *page, int cold), @@ -214,7 +214,7 @@ TRACE_EVENT(mm_page_alloc,  	TP_printk("page=%p pfn=%lu order=%d migratetype=%d gfp_flags=%s",  		__entry->page, -		page_to_pfn(__entry->page), +		__entry->page ? page_to_pfn(__entry->page) : 0,  		__entry->order,  		__entry->migratetype,  		show_gfp_flags(__entry->gfp_flags)) @@ -240,7 +240,7 @@ DECLARE_EVENT_CLASS(mm_page,  	TP_printk("page=%p pfn=%lu order=%u migratetype=%d percpu_refill=%d",  		__entry->page, -		page_to_pfn(__entry->page), +		__entry->page ? page_to_pfn(__entry->page) : 0,  		__entry->order,  		__entry->migratetype,  		__entry->order == 0) @@ -267,12 +267,12 @@ DEFINE_EVENT_PRINT(mm_page, mm_page_pcpu_drain,  TRACE_EVENT(mm_page_alloc_extfrag,  	TP_PROTO(struct page *page, -			int alloc_order, int fallback_order, -			int alloc_migratetype, int fallback_migratetype), +		int alloc_order, int fallback_order, +		int alloc_migratetype, int fallback_migratetype, int new_migratetype),  	TP_ARGS(page,  		alloc_order, fallback_order, -		alloc_migratetype, fallback_migratetype), +		alloc_migratetype, fallback_migratetype, new_migratetype),  	TP_STRUCT__entry(  		__field(	struct page *,	page			) @@ -280,6 +280,7 @@ TRACE_EVENT(mm_page_alloc_extfrag,  		__field(	int,		fallback_order		)  		__field(	int,		alloc_migratetype	)  		__field(	int,		fallback_migratetype	) +		__field(	int,		change_ownership	)  	),  	TP_fast_assign( @@ -288,6 +289,7 @@ TRACE_EVENT(mm_page_alloc_extfrag,  		__entry->fallback_order		= fallback_order;  		__entry->alloc_migratetype	= alloc_migratetype;  		__entry->fallback_migratetype	= fallback_migratetype; +		__entry->change_ownership	= (new_migratetype == alloc_migratetype);  	),  	TP_printk("page=%p pfn=%lu alloc_order=%d fallback_order=%d pageblock_order=%d alloc_migratetype=%d fallback_migratetype=%d fragmenting=%d change_ownership=%d", @@ -299,7 +301,7 @@ TRACE_EVENT(mm_page_alloc_extfrag,  		__entry->alloc_migratetype,  		__entry->fallback_migratetype,  		__entry->fallback_order < pageblock_order, -		__entry->alloc_migratetype == __entry->fallback_migratetype) +		__entry->change_ownership)  );  #endif /* _TRACE_KMEM_H */ diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h index 6dd3a51ab1c..131a0bda7ae 100644 --- a/include/trace/events/kvm.h +++ b/include/trace/events/kvm.h @@ -6,7 +6,38 @@  #undef TRACE_SYSTEM  #define TRACE_SYSTEM kvm -#if defined(__KVM_HAVE_IOAPIC) +#define ERSN(x) { KVM_EXIT_##x, "KVM_EXIT_" #x } + +#define kvm_trace_exit_reason						\ +	ERSN(UNKNOWN), ERSN(EXCEPTION), ERSN(IO), ERSN(HYPERCALL),	\ +	ERSN(DEBUG), ERSN(HLT), ERSN(MMIO), ERSN(IRQ_WINDOW_OPEN),	\ +	ERSN(SHUTDOWN), ERSN(FAIL_ENTRY), ERSN(INTR), ERSN(SET_TPR),	\ +	ERSN(TPR_ACCESS), ERSN(S390_SIEIC), ERSN(S390_RESET), ERSN(DCR),\ +	ERSN(NMI), ERSN(INTERNAL_ERROR), ERSN(OSI), ERSN(PAPR_HCALL),	\ +	ERSN(S390_UCONTROL), ERSN(WATCHDOG), ERSN(S390_TSCH) + +TRACE_EVENT(kvm_userspace_exit, +	    TP_PROTO(__u32 reason, int errno), +	    TP_ARGS(reason, errno), + +	TP_STRUCT__entry( +		__field(	__u32,		reason		) +		__field(	int,		errno		) +	), + +	TP_fast_assign( +		__entry->reason		= reason; +		__entry->errno		= errno; +	), + +	TP_printk("reason %s (%d)", +		  __entry->errno < 0 ? +		  (__entry->errno == -EINTR ? "restart" : "error") : +		  __print_symbolic(__entry->reason, kvm_trace_exit_reason), +		  __entry->errno < 0 ? -__entry->errno : __entry->reason) +); + +#if defined(CONFIG_HAVE_KVM_IRQCHIP)  TRACE_EVENT(kvm_set_irq,  	TP_PROTO(unsigned int gsi, int level, int irq_source_id),  	TP_ARGS(gsi, level, irq_source_id), @@ -26,7 +57,9 @@ TRACE_EVENT(kvm_set_irq,  	TP_printk("gsi %u level %d source %d",  		  __entry->gsi, __entry->level, __entry->irq_source_id)  ); +#endif +#if defined(__KVM_HAVE_IOAPIC)  #define kvm_deliver_mode		\  	{0x0, "Fixed"},			\  	{0x1, "LowPrio"},		\ @@ -89,6 +122,10 @@ TRACE_EVENT(kvm_msi_set_irq,  	{KVM_IRQCHIP_PIC_SLAVE,		"PIC slave"},		\  	{KVM_IRQCHIP_IOAPIC,		"IOAPIC"} +#endif /* defined(__KVM_HAVE_IOAPIC) */ + +#if defined(CONFIG_HAVE_KVM_IRQCHIP) +  TRACE_EVENT(kvm_ack_irq,  	TP_PROTO(unsigned int irqchip, unsigned int pin),  	TP_ARGS(irqchip, pin), @@ -103,14 +140,18 @@ TRACE_EVENT(kvm_ack_irq,  		__entry->pin		= pin;  	), +#ifdef kvm_irqchips  	TP_printk("irqchip %s pin %u",  		  __print_symbolic(__entry->irqchip, kvm_irqchips),  		 __entry->pin) +#else +	TP_printk("irqchip %d pin %u", __entry->irqchip, __entry->pin) +#endif  ); +#endif /* defined(CONFIG_HAVE_KVM_IRQCHIP) */ -#endif /* defined(__KVM_HAVE_IOAPIC) */  #define KVM_TRACE_MMIO_READ_UNSATISFIED 0  #define KVM_TRACE_MMIO_READ 1 @@ -185,6 +226,95 @@ TRACE_EVENT(kvm_age_page,  		  __entry->referenced ? "YOUNG" : "OLD")  ); +#ifdef CONFIG_KVM_ASYNC_PF +DECLARE_EVENT_CLASS(kvm_async_get_page_class, + +	TP_PROTO(u64 gva, u64 gfn), + +	TP_ARGS(gva, gfn), + +	TP_STRUCT__entry( +		__field(__u64, gva) +		__field(u64, gfn) +	), + +	TP_fast_assign( +		__entry->gva = gva; +		__entry->gfn = gfn; +	), + +	TP_printk("gva = %#llx, gfn = %#llx", __entry->gva, __entry->gfn) +); + +DEFINE_EVENT(kvm_async_get_page_class, kvm_try_async_get_page, + +	TP_PROTO(u64 gva, u64 gfn), + +	TP_ARGS(gva, gfn) +); + +DEFINE_EVENT(kvm_async_get_page_class, kvm_async_pf_doublefault, + +	TP_PROTO(u64 gva, u64 gfn), + +	TP_ARGS(gva, gfn) +); + +DECLARE_EVENT_CLASS(kvm_async_pf_nopresent_ready, + +	TP_PROTO(u64 token, u64 gva), + +	TP_ARGS(token, gva), + +	TP_STRUCT__entry( +		__field(__u64, token) +		__field(__u64, gva) +	), + +	TP_fast_assign( +		__entry->token = token; +		__entry->gva = gva; +	), + +	TP_printk("token %#llx gva %#llx", __entry->token, __entry->gva) + +); + +DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_not_present, + +	TP_PROTO(u64 token, u64 gva), + +	TP_ARGS(token, gva) +); + +DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_ready, + +	TP_PROTO(u64 token, u64 gva), + +	TP_ARGS(token, gva) +); + +TRACE_EVENT( +	kvm_async_pf_completed, +	TP_PROTO(unsigned long address, u64 gva), +	TP_ARGS(address, gva), + +	TP_STRUCT__entry( +		__field(unsigned long, address) +		__field(u64, gva) +		), + +	TP_fast_assign( +		__entry->address = address; +		__entry->gva = gva; +		), + +	TP_printk("gva %#llx address %#lx",  __entry->gva, +		  __entry->address) +); + +#endif +  #endif /* _TRACE_KVM_MAIN_H */  /* This part must be outside protection */ diff --git a/include/trace/events/mce.h b/include/trace/events/mce.h index 7eee77895cb..4cbbcef6baa 100644 --- a/include/trace/events/mce.h +++ b/include/trace/events/mce.h @@ -17,36 +17,36 @@ TRACE_EVENT(mce_record,  	TP_STRUCT__entry(  		__field(	u64,		mcgcap		)  		__field(	u64,		mcgstatus	) -		__field(	u8,		bank		)  		__field(	u64,		status		)  		__field(	u64,		addr		)  		__field(	u64,		misc		)  		__field(	u64,		ip		) -		__field(	u8,		cs		)  		__field(	u64,		tsc		)  		__field(	u64,		walltime	)  		__field(	u32,		cpu		)  		__field(	u32,		cpuid		)  		__field(	u32,		apicid		)  		__field(	u32,		socketid	) +		__field(	u8,		cs		) +		__field(	u8,		bank		)  		__field(	u8,		cpuvendor	)  	),  	TP_fast_assign(  		__entry->mcgcap		= m->mcgcap;  		__entry->mcgstatus	= m->mcgstatus; -		__entry->bank		= m->bank;  		__entry->status		= m->status;  		__entry->addr		= m->addr;  		__entry->misc		= m->misc;  		__entry->ip		= m->ip; -		__entry->cs		= m->cs;  		__entry->tsc		= m->tsc;  		__entry->walltime	= m->time;  		__entry->cpu		= m->extcpu;  		__entry->cpuid		= m->cpuid;  		__entry->apicid		= m->apicid;  		__entry->socketid	= m->socketid; +		__entry->cs		= m->cs; +		__entry->bank		= m->bank;  		__entry->cpuvendor	= m->cpuvendor;  	), diff --git a/include/trace/events/migrate.h b/include/trace/events/migrate.h new file mode 100644 index 00000000000..4e4f2f8b1ac --- /dev/null +++ b/include/trace/events/migrate.h @@ -0,0 +1,79 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM migrate + +#if !defined(_TRACE_MIGRATE_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_MIGRATE_H + +#include <linux/tracepoint.h> + +#define MIGRATE_MODE						\ +	{MIGRATE_ASYNC,		"MIGRATE_ASYNC"},		\ +	{MIGRATE_SYNC_LIGHT,	"MIGRATE_SYNC_LIGHT"},		\ +	{MIGRATE_SYNC,		"MIGRATE_SYNC"}		 + +#define MIGRATE_REASON						\ +	{MR_COMPACTION,		"compaction"},			\ +	{MR_MEMORY_FAILURE,	"memory_failure"},		\ +	{MR_MEMORY_HOTPLUG,	"memory_hotplug"},		\ +	{MR_SYSCALL,		"syscall_or_cpuset"},		\ +	{MR_MEMPOLICY_MBIND,	"mempolicy_mbind"},		\ +	{MR_CMA,		"cma"} + +TRACE_EVENT(mm_migrate_pages, + +	TP_PROTO(unsigned long succeeded, unsigned long failed, +		 enum migrate_mode mode, int reason), + +	TP_ARGS(succeeded, failed, mode, reason), + +	TP_STRUCT__entry( +		__field(	unsigned long,		succeeded) +		__field(	unsigned long,		failed) +		__field(	enum migrate_mode,	mode) +		__field(	int,			reason) +	), + +	TP_fast_assign( +		__entry->succeeded	= succeeded; +		__entry->failed		= failed; +		__entry->mode		= mode; +		__entry->reason		= reason; +	), + +	TP_printk("nr_succeeded=%lu nr_failed=%lu mode=%s reason=%s", +		__entry->succeeded, +		__entry->failed, +		__print_symbolic(__entry->mode, MIGRATE_MODE), +		__print_symbolic(__entry->reason, MIGRATE_REASON)) +); + +TRACE_EVENT(mm_numa_migrate_ratelimit, + +	TP_PROTO(struct task_struct *p, int dst_nid, unsigned long nr_pages), + +	TP_ARGS(p, dst_nid, nr_pages), + +	TP_STRUCT__entry( +		__array(	char,		comm,	TASK_COMM_LEN) +		__field(	pid_t,		pid) +		__field(	int,		dst_nid) +		__field(	unsigned long,	nr_pages) +	), + +	TP_fast_assign( +		memcpy(__entry->comm, p->comm, TASK_COMM_LEN); +		__entry->pid		= p->pid; +		__entry->dst_nid	= dst_nid; +		__entry->nr_pages	= nr_pages; +	), + +	TP_printk("comm=%s pid=%d dst_nid=%d nr_pages=%lu", +		__entry->comm, +		__entry->pid, +		__entry->dst_nid, +		__entry->nr_pages) +); +#endif /* _TRACE_MIGRATE_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/module.h b/include/trace/events/module.h index c7bb2f0482f..7c5cbfe3fc4 100644 --- a/include/trace/events/module.h +++ b/include/trace/events/module.h @@ -1,5 +1,15 @@ +/* + * Because linux/module.h has tracepoints in the header, and ftrace.h + * used to include this file, define_trace.h includes linux/module.h + * But we do not want the module.h to override the TRACE_SYSTEM macro + * variable that define_trace.h is processing, so we only set it + * when module events are being processed, which would happen when + * CREATE_TRACE_POINTS is defined. + */ +#ifdef CREATE_TRACE_POINTS  #undef TRACE_SYSTEM  #define TRACE_SYSTEM module +#endif  #if !defined(_TRACE_MODULE_H) || defined(TRACE_HEADER_MULTI_READ)  #define _TRACE_MODULE_H @@ -12,8 +22,10 @@ struct module;  #define show_module_flags(flags) __print_flags(flags, "",	\  	{ (1UL << TAINT_PROPRIETARY_MODULE),	"P" },		\ +	{ (1UL << TAINT_OOT_MODULE),		"O" },		\  	{ (1UL << TAINT_FORCED_MODULE),		"F" },		\ -	{ (1UL << TAINT_CRAP),			"C" }) +	{ (1UL << TAINT_CRAP),			"C" },		\ +	{ (1UL << TAINT_UNSIGNED_MODULE),	"E" })  TRACE_EVENT(module_load, @@ -68,7 +80,7 @@ DECLARE_EVENT_CLASS(module_refcnt,  	TP_fast_assign(  		__entry->ip	= ip; -		__entry->refcnt	= __this_cpu_read(mod->refptr->incs) + __this_cpu_read(mod->refptr->decs); +		__entry->refcnt	= __this_cpu_read(mod->refptr->incs) - __this_cpu_read(mod->refptr->decs);  		__assign_str(name, mod->name);  	), @@ -98,14 +110,14 @@ TRACE_EVENT(module_request,  	TP_ARGS(name, wait, ip),  	TP_STRUCT__entry( -		__field(	bool,		wait		)  		__field(	unsigned long,	ip		) +		__field(	bool,		wait		)  		__string(	name,		name		)  	),  	TP_fast_assign( -		__entry->wait	= wait;  		__entry->ip	= ip; +		__entry->wait	= wait;  		__assign_str(name, name);  	), @@ -119,4 +131,3 @@ TRACE_EVENT(module_request,  /* This part must be outside protection */  #include <trace/define_trace.h> - diff --git a/include/trace/events/net.h b/include/trace/events/net.h index 5f247f5ffc5..1de256b3580 100644 --- a/include/trace/events/net.h +++ b/include/trace/events/net.h @@ -6,28 +6,88 @@  #include <linux/skbuff.h>  #include <linux/netdevice.h> +#include <linux/if_vlan.h>  #include <linux/ip.h>  #include <linux/tracepoint.h> +TRACE_EVENT(net_dev_start_xmit, + +	TP_PROTO(const struct sk_buff *skb, const struct net_device *dev), + +	TP_ARGS(skb, dev), + +	TP_STRUCT__entry( +		__string(	name,			dev->name	) +		__field(	u16,			queue_mapping	) +		__field(	const void *,		skbaddr		) +		__field(	bool,			vlan_tagged	) +		__field(	u16,			vlan_proto	) +		__field(	u16,			vlan_tci	) +		__field(	u16,			protocol	) +		__field(	u8,			ip_summed	) +		__field(	unsigned int,		len		) +		__field(	unsigned int,		data_len	) +		__field(	int,			network_offset	) +		__field(	bool,			transport_offset_valid) +		__field(	int,			transport_offset) +		__field(	u8,			tx_flags	) +		__field(	u16,			gso_size	) +		__field(	u16,			gso_segs	) +		__field(	u16,			gso_type	) +	), + +	TP_fast_assign( +		__assign_str(name, dev->name); +		__entry->queue_mapping = skb->queue_mapping; +		__entry->skbaddr = skb; +		__entry->vlan_tagged = vlan_tx_tag_present(skb); +		__entry->vlan_proto = ntohs(skb->vlan_proto); +		__entry->vlan_tci = vlan_tx_tag_get(skb); +		__entry->protocol = ntohs(skb->protocol); +		__entry->ip_summed = skb->ip_summed; +		__entry->len = skb->len; +		__entry->data_len = skb->data_len; +		__entry->network_offset = skb_network_offset(skb); +		__entry->transport_offset_valid = +			skb_transport_header_was_set(skb); +		__entry->transport_offset = skb_transport_offset(skb); +		__entry->tx_flags = skb_shinfo(skb)->tx_flags; +		__entry->gso_size = skb_shinfo(skb)->gso_size; +		__entry->gso_segs = skb_shinfo(skb)->gso_segs; +		__entry->gso_type = skb_shinfo(skb)->gso_type; +	), + +	TP_printk("dev=%s queue_mapping=%u skbaddr=%p vlan_tagged=%d vlan_proto=0x%04x vlan_tci=0x%04x protocol=0x%04x ip_summed=%d len=%u data_len=%u network_offset=%d transport_offset_valid=%d transport_offset=%d tx_flags=%d gso_size=%d gso_segs=%d gso_type=%#x", +		  __get_str(name), __entry->queue_mapping, __entry->skbaddr, +		  __entry->vlan_tagged, __entry->vlan_proto, __entry->vlan_tci, +		  __entry->protocol, __entry->ip_summed, __entry->len, +		  __entry->data_len, +		  __entry->network_offset, __entry->transport_offset_valid, +		  __entry->transport_offset, __entry->tx_flags, +		  __entry->gso_size, __entry->gso_segs, __entry->gso_type) +); +  TRACE_EVENT(net_dev_xmit,  	TP_PROTO(struct sk_buff *skb, -		 int rc), +		 int rc, +		 struct net_device *dev, +		 unsigned int skb_len), -	TP_ARGS(skb, rc), +	TP_ARGS(skb, rc, dev, skb_len),  	TP_STRUCT__entry(  		__field(	void *,		skbaddr		)  		__field(	unsigned int,	len		)  		__field(	int,		rc		) -		__string(	name,		skb->dev->name	) +		__string(	name,		dev->name	)  	),  	TP_fast_assign(  		__entry->skbaddr = skb; -		__entry->len = skb->len; +		__entry->len = skb_len;  		__entry->rc = rc; -		__assign_str(name, skb->dev->name); +		__assign_str(name, dev->name);  	),  	TP_printk("dev=%s skbaddr=%p len=%u rc=%d", @@ -76,6 +136,106 @@ DEFINE_EVENT(net_dev_template, netif_rx,  	TP_ARGS(skb)  ); + +DECLARE_EVENT_CLASS(net_dev_rx_verbose_template, + +	TP_PROTO(const struct sk_buff *skb), + +	TP_ARGS(skb), + +	TP_STRUCT__entry( +		__string(	name,			skb->dev->name	) +		__field(	unsigned int,		napi_id		) +		__field(	u16,			queue_mapping	) +		__field(	const void *,		skbaddr		) +		__field(	bool,			vlan_tagged	) +		__field(	u16,			vlan_proto	) +		__field(	u16,			vlan_tci	) +		__field(	u16,			protocol	) +		__field(	u8,			ip_summed	) +		__field(	u32,			hash		) +		__field(	bool,			l4_hash		) +		__field(	unsigned int,		len		) +		__field(	unsigned int,		data_len	) +		__field(	unsigned int,		truesize	) +		__field(	bool,			mac_header_valid) +		__field(	int,			mac_header	) +		__field(	unsigned char,		nr_frags	) +		__field(	u16,			gso_size	) +		__field(	u16,			gso_type	) +	), + +	TP_fast_assign( +		__assign_str(name, skb->dev->name); +#ifdef CONFIG_NET_RX_BUSY_POLL +		__entry->napi_id = skb->napi_id; +#else +		__entry->napi_id = 0; +#endif +		__entry->queue_mapping = skb->queue_mapping; +		__entry->skbaddr = skb; +		__entry->vlan_tagged = vlan_tx_tag_present(skb); +		__entry->vlan_proto = ntohs(skb->vlan_proto); +		__entry->vlan_tci = vlan_tx_tag_get(skb); +		__entry->protocol = ntohs(skb->protocol); +		__entry->ip_summed = skb->ip_summed; +		__entry->hash = skb->hash; +		__entry->l4_hash = skb->l4_hash; +		__entry->len = skb->len; +		__entry->data_len = skb->data_len; +		__entry->truesize = skb->truesize; +		__entry->mac_header_valid = skb_mac_header_was_set(skb); +		__entry->mac_header = skb_mac_header(skb) - skb->data; +		__entry->nr_frags = skb_shinfo(skb)->nr_frags; +		__entry->gso_size = skb_shinfo(skb)->gso_size; +		__entry->gso_type = skb_shinfo(skb)->gso_type; +	), + +	TP_printk("dev=%s napi_id=%#x queue_mapping=%u skbaddr=%p vlan_tagged=%d vlan_proto=0x%04x vlan_tci=0x%04x protocol=0x%04x ip_summed=%d hash=0x%08x l4_hash=%d len=%u data_len=%u truesize=%u mac_header_valid=%d mac_header=%d nr_frags=%d gso_size=%d gso_type=%#x", +		  __get_str(name), __entry->napi_id, __entry->queue_mapping, +		  __entry->skbaddr, __entry->vlan_tagged, __entry->vlan_proto, +		  __entry->vlan_tci, __entry->protocol, __entry->ip_summed, +		  __entry->hash, __entry->l4_hash, __entry->len, +		  __entry->data_len, __entry->truesize, +		  __entry->mac_header_valid, __entry->mac_header, +		  __entry->nr_frags, __entry->gso_size, __entry->gso_type) +); + +DEFINE_EVENT(net_dev_rx_verbose_template, napi_gro_frags_entry, + +	TP_PROTO(const struct sk_buff *skb), + +	TP_ARGS(skb) +); + +DEFINE_EVENT(net_dev_rx_verbose_template, napi_gro_receive_entry, + +	TP_PROTO(const struct sk_buff *skb), + +	TP_ARGS(skb) +); + +DEFINE_EVENT(net_dev_rx_verbose_template, netif_receive_skb_entry, + +	TP_PROTO(const struct sk_buff *skb), + +	TP_ARGS(skb) +); + +DEFINE_EVENT(net_dev_rx_verbose_template, netif_rx_entry, + +	TP_PROTO(const struct sk_buff *skb), + +	TP_ARGS(skb) +); + +DEFINE_EVENT(net_dev_rx_verbose_template, netif_rx_ni_entry, + +	TP_PROTO(const struct sk_buff *skb), + +	TP_ARGS(skb) +); +  #endif /* _TRACE_NET_H */  /* This part must be outside protection */ diff --git a/include/trace/events/nmi.h b/include/trace/events/nmi.h new file mode 100644 index 00000000000..da3ee96b8d0 --- /dev/null +++ b/include/trace/events/nmi.h @@ -0,0 +1,37 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM nmi + +#if !defined(_TRACE_NMI_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_NMI_H + +#include <linux/ktime.h> +#include <linux/tracepoint.h> + +TRACE_EVENT(nmi_handler, + +	TP_PROTO(void *handler, s64 delta_ns, int handled), + +	TP_ARGS(handler, delta_ns, handled), + +	TP_STRUCT__entry( +		__field(	void *,		handler	) +		__field(	s64,		delta_ns) +		__field(	int,		handled	) +	), + +	TP_fast_assign( +		__entry->handler = handler; +		__entry->delta_ns = delta_ns; +		__entry->handled = handled; +	), + +	TP_printk("%ps() delta_ns: %lld handled: %d", +		__entry->handler, +		__entry->delta_ns, +		__entry->handled) +); + +#endif /* _TRACE_NMI_H */ + +/* This part ust be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/oom.h b/include/trace/events/oom.h new file mode 100644 index 00000000000..1e974983757 --- /dev/null +++ b/include/trace/events/oom.h @@ -0,0 +1,33 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM oom + +#if !defined(_TRACE_OOM_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_OOM_H +#include <linux/tracepoint.h> + +TRACE_EVENT(oom_score_adj_update, + +	TP_PROTO(struct task_struct *task), + +	TP_ARGS(task), + +	TP_STRUCT__entry( +		__field(	pid_t,	pid) +		__array(	char,	comm,	TASK_COMM_LEN ) +		__field(	short,	oom_score_adj) +	), + +	TP_fast_assign( +		__entry->pid = task->pid; +		memcpy(__entry->comm, task->comm, TASK_COMM_LEN); +		__entry->oom_score_adj = task->signal->oom_score_adj; +	), + +	TP_printk("pid=%d comm=%s oom_score_adj=%hd", +		__entry->pid, __entry->comm, __entry->oom_score_adj) +); + +#endif + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/pagemap.h b/include/trace/events/pagemap.h new file mode 100644 index 00000000000..1c9fabde69e --- /dev/null +++ b/include/trace/events/pagemap.h @@ -0,0 +1,89 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM pagemap + +#if !defined(_TRACE_PAGEMAP_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_PAGEMAP_H + +#include <linux/tracepoint.h> +#include <linux/mm.h> + +#define	PAGEMAP_MAPPED		0x0001u +#define PAGEMAP_ANONYMOUS	0x0002u +#define PAGEMAP_FILE		0x0004u +#define PAGEMAP_SWAPCACHE	0x0008u +#define PAGEMAP_SWAPBACKED	0x0010u +#define PAGEMAP_MAPPEDDISK	0x0020u +#define PAGEMAP_BUFFERS		0x0040u + +#define trace_pagemap_flags(page) ( \ +	(PageAnon(page)		? PAGEMAP_ANONYMOUS  : PAGEMAP_FILE) | \ +	(page_mapped(page)	? PAGEMAP_MAPPED     : 0) | \ +	(PageSwapCache(page)	? PAGEMAP_SWAPCACHE  : 0) | \ +	(PageSwapBacked(page)	? PAGEMAP_SWAPBACKED : 0) | \ +	(PageMappedToDisk(page)	? PAGEMAP_MAPPEDDISK : 0) | \ +	(page_has_private(page) ? PAGEMAP_BUFFERS    : 0) \ +	) + +TRACE_EVENT(mm_lru_insertion, + +	TP_PROTO( +		struct page *page, +		unsigned long pfn, +		int lru, +		unsigned long flags +	), + +	TP_ARGS(page, pfn, lru, flags), + +	TP_STRUCT__entry( +		__field(struct page *,	page	) +		__field(unsigned long,	pfn	) +		__field(int,		lru	) +		__field(unsigned long,	flags	) +	), + +	TP_fast_assign( +		__entry->page	= page; +		__entry->pfn	= pfn; +		__entry->lru	= lru; +		__entry->flags	= flags; +	), + +	/* Flag format is based on page-types.c formatting for pagemap */ +	TP_printk("page=%p pfn=%lu lru=%d flags=%s%s%s%s%s%s", +			__entry->page, +			__entry->pfn, +			__entry->lru, +			__entry->flags & PAGEMAP_MAPPED		? "M" : " ", +			__entry->flags & PAGEMAP_ANONYMOUS	? "a" : "f", +			__entry->flags & PAGEMAP_SWAPCACHE	? "s" : " ", +			__entry->flags & PAGEMAP_SWAPBACKED	? "b" : " ", +			__entry->flags & PAGEMAP_MAPPEDDISK	? "d" : " ", +			__entry->flags & PAGEMAP_BUFFERS	? "B" : " ") +); + +TRACE_EVENT(mm_lru_activate, + +	TP_PROTO(struct page *page, unsigned long pfn), + +	TP_ARGS(page, pfn), + +	TP_STRUCT__entry( +		__field(struct page *,	page	) +		__field(unsigned long,	pfn	) +	), + +	TP_fast_assign( +		__entry->page	= page; +		__entry->pfn	= pfn; +	), + +	/* Flag format is based on page-types.c formatting for pagemap */ +	TP_printk("page=%p pfn=%lu", __entry->page, __entry->pfn) + +); + +#endif /* _TRACE_PAGEMAP_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/power.h b/include/trace/events/power.h index 286784d69b8..d19840b0cac 100644 --- a/include/trace/events/power.h +++ b/include/trace/events/power.h @@ -5,74 +5,216 @@  #define _TRACE_POWER_H  #include <linux/ktime.h> +#include <linux/pm_qos.h>  #include <linux/tracepoint.h> +#include <linux/ftrace_event.h> -#ifndef _TRACE_POWER_ENUM_ -#define _TRACE_POWER_ENUM_ -enum { -	POWER_NONE	= 0, -	POWER_CSTATE	= 1,	/* C-State */ -	POWER_PSTATE	= 2,	/* Fequency change or DVFS */ -	POWER_SSTATE	= 3,	/* Suspend */ -}; -#endif +#define TPS(x)  tracepoint_string(x) -/* - * The power events are used for cpuidle & suspend (power_start, power_end) - *  and for cpufreq (power_frequency) - */ -DECLARE_EVENT_CLASS(power, +DECLARE_EVENT_CLASS(cpu, -	TP_PROTO(unsigned int type, unsigned int state, unsigned int cpu_id), +	TP_PROTO(unsigned int state, unsigned int cpu_id), -	TP_ARGS(type, state, cpu_id), +	TP_ARGS(state, cpu_id),  	TP_STRUCT__entry( -		__field(	u64,		type		) -		__field(	u64,		state		) -		__field(	u64,		cpu_id		) +		__field(	u32,		state		) +		__field(	u32,		cpu_id		)  	),  	TP_fast_assign( -		__entry->type = type;  		__entry->state = state;  		__entry->cpu_id = cpu_id;  	), -	TP_printk("type=%lu state=%lu cpu_id=%lu", (unsigned long)__entry->type, -		(unsigned long)__entry->state, (unsigned long)__entry->cpu_id) +	TP_printk("state=%lu cpu_id=%lu", (unsigned long)__entry->state, +		  (unsigned long)__entry->cpu_id)  ); -DEFINE_EVENT(power, power_start, +DEFINE_EVENT(cpu, cpu_idle, -	TP_PROTO(unsigned int type, unsigned int state, unsigned int cpu_id), +	TP_PROTO(unsigned int state, unsigned int cpu_id), -	TP_ARGS(type, state, cpu_id) +	TP_ARGS(state, cpu_id)  ); -DEFINE_EVENT(power, power_frequency, +TRACE_EVENT(pstate_sample, -	TP_PROTO(unsigned int type, unsigned int state, unsigned int cpu_id), +	TP_PROTO(u32 core_busy, +		u32 scaled_busy, +		u32 state, +		u64 mperf, +		u64 aperf, +		u32 freq +		), + +	TP_ARGS(core_busy, +		scaled_busy, +		state, +		mperf, +		aperf, +		freq +		), + +	TP_STRUCT__entry( +		__field(u32, core_busy) +		__field(u32, scaled_busy) +		__field(u32, state) +		__field(u64, mperf) +		__field(u64, aperf) +		__field(u32, freq) + +	), + +	TP_fast_assign( +		__entry->core_busy = core_busy; +		__entry->scaled_busy = scaled_busy; +		__entry->state = state; +		__entry->mperf = mperf; +		__entry->aperf = aperf; +		__entry->freq = freq; +		), + +	TP_printk("core_busy=%lu scaled=%lu state=%lu mperf=%llu aperf=%llu freq=%lu ", +		(unsigned long)__entry->core_busy, +		(unsigned long)__entry->scaled_busy, +		(unsigned long)__entry->state, +		(unsigned long long)__entry->mperf, +		(unsigned long long)__entry->aperf, +		(unsigned long)__entry->freq +		) -	TP_ARGS(type, state, cpu_id)  ); -TRACE_EVENT(power_end, +/* This file can get included multiple times, TRACE_HEADER_MULTI_READ at top */ +#ifndef _PWR_EVENT_AVOID_DOUBLE_DEFINING +#define _PWR_EVENT_AVOID_DOUBLE_DEFINING + +#define PWR_EVENT_EXIT -1 +#endif -	TP_PROTO(unsigned int cpu_id), +#define pm_verb_symbolic(event) \ +	__print_symbolic(event, \ +		{ PM_EVENT_SUSPEND, "suspend" }, \ +		{ PM_EVENT_RESUME, "resume" }, \ +		{ PM_EVENT_FREEZE, "freeze" }, \ +		{ PM_EVENT_QUIESCE, "quiesce" }, \ +		{ PM_EVENT_HIBERNATE, "hibernate" }, \ +		{ PM_EVENT_THAW, "thaw" }, \ +		{ PM_EVENT_RESTORE, "restore" }, \ +		{ PM_EVENT_RECOVER, "recover" }) + +DEFINE_EVENT(cpu, cpu_frequency, + +	TP_PROTO(unsigned int frequency, unsigned int cpu_id), + +	TP_ARGS(frequency, cpu_id) +); -	TP_ARGS(cpu_id), +TRACE_EVENT(device_pm_callback_start, + +	TP_PROTO(struct device *dev, const char *pm_ops, int event), + +	TP_ARGS(dev, pm_ops, event),  	TP_STRUCT__entry( -		__field(	u64,		cpu_id		) +		__string(device, dev_name(dev)) +		__string(driver, dev_driver_string(dev)) +		__string(parent, dev->parent ? dev_name(dev->parent) : "none") +		__string(pm_ops, pm_ops ? pm_ops : "none ") +		__field(int, event)  	),  	TP_fast_assign( -		__entry->cpu_id = cpu_id; +		__assign_str(device, dev_name(dev)); +		__assign_str(driver, dev_driver_string(dev)); +		__assign_str(parent, +			dev->parent ? dev_name(dev->parent) : "none"); +		__assign_str(pm_ops, pm_ops ? pm_ops : "none "); +		__entry->event = event; +	), + +	TP_printk("%s %s, parent: %s, %s[%s]", __get_str(driver), +		__get_str(device), __get_str(parent), __get_str(pm_ops), +		pm_verb_symbolic(__entry->event)) +); + +TRACE_EVENT(device_pm_callback_end, + +	TP_PROTO(struct device *dev, int error), + +	TP_ARGS(dev, error), + +	TP_STRUCT__entry( +		__string(device, dev_name(dev)) +		__string(driver, dev_driver_string(dev)) +		__field(int, error)  	), -	TP_printk("cpu_id=%lu", (unsigned long)__entry->cpu_id) +	TP_fast_assign( +		__assign_str(device, dev_name(dev)); +		__assign_str(driver, dev_driver_string(dev)); +		__entry->error = error; +	), +	TP_printk("%s %s, err=%d", +		__get_str(driver), __get_str(device), __entry->error) +); + +TRACE_EVENT(suspend_resume, + +	TP_PROTO(const char *action, int val, bool start), + +	TP_ARGS(action, val, start), + +	TP_STRUCT__entry( +		__field(const char *, action) +		__field(int, val) +		__field(bool, start) +	), + +	TP_fast_assign( +		__entry->action = action; +		__entry->val = val; +		__entry->start = start; +	), + +	TP_printk("%s[%u] %s", __entry->action, (unsigned int)__entry->val, +		(__entry->start)?"begin":"end") +); + +DECLARE_EVENT_CLASS(wakeup_source, + +	TP_PROTO(const char *name, unsigned int state), + +	TP_ARGS(name, state), + +	TP_STRUCT__entry( +		__string(       name,           name            ) +		__field(        u64,            state           ) +	), + +	TP_fast_assign( +		__assign_str(name, name); +		__entry->state = state; +	), + +	TP_printk("%s state=0x%lx", __get_str(name), +		(unsigned long)__entry->state) +); + +DEFINE_EVENT(wakeup_source, wakeup_source_activate, + +	TP_PROTO(const char *name, unsigned int state), + +	TP_ARGS(name, state) +); + +DEFINE_EVENT(wakeup_source, wakeup_source_deactivate, + +	TP_PROTO(const char *name, unsigned int state), + +	TP_ARGS(name, state)  );  /* @@ -154,6 +296,177 @@ DEFINE_EVENT(power_domain, power_domain_target,  	TP_ARGS(name, state, cpu_id)  ); +/* + * The pm qos events are used for pm qos update + */ +DECLARE_EVENT_CLASS(pm_qos_request, + +	TP_PROTO(int pm_qos_class, s32 value), + +	TP_ARGS(pm_qos_class, value), + +	TP_STRUCT__entry( +		__field( int,                    pm_qos_class   ) +		__field( s32,                    value          ) +	), + +	TP_fast_assign( +		__entry->pm_qos_class = pm_qos_class; +		__entry->value = value; +	), + +	TP_printk("pm_qos_class=%s value=%d", +		  __print_symbolic(__entry->pm_qos_class, +			{ PM_QOS_CPU_DMA_LATENCY,	"CPU_DMA_LATENCY" }, +			{ PM_QOS_NETWORK_LATENCY,	"NETWORK_LATENCY" }, +			{ PM_QOS_NETWORK_THROUGHPUT,	"NETWORK_THROUGHPUT" }), +		  __entry->value) +); + +DEFINE_EVENT(pm_qos_request, pm_qos_add_request, + +	TP_PROTO(int pm_qos_class, s32 value), + +	TP_ARGS(pm_qos_class, value) +); + +DEFINE_EVENT(pm_qos_request, pm_qos_update_request, + +	TP_PROTO(int pm_qos_class, s32 value), + +	TP_ARGS(pm_qos_class, value) +); + +DEFINE_EVENT(pm_qos_request, pm_qos_remove_request, + +	TP_PROTO(int pm_qos_class, s32 value), + +	TP_ARGS(pm_qos_class, value) +); + +TRACE_EVENT(pm_qos_update_request_timeout, + +	TP_PROTO(int pm_qos_class, s32 value, unsigned long timeout_us), + +	TP_ARGS(pm_qos_class, value, timeout_us), + +	TP_STRUCT__entry( +		__field( int,                    pm_qos_class   ) +		__field( s32,                    value          ) +		__field( unsigned long,          timeout_us     ) +	), + +	TP_fast_assign( +		__entry->pm_qos_class = pm_qos_class; +		__entry->value = value; +		__entry->timeout_us = timeout_us; +	), + +	TP_printk("pm_qos_class=%s value=%d, timeout_us=%ld", +		  __print_symbolic(__entry->pm_qos_class, +			{ PM_QOS_CPU_DMA_LATENCY,	"CPU_DMA_LATENCY" }, +			{ PM_QOS_NETWORK_LATENCY,	"NETWORK_LATENCY" }, +			{ PM_QOS_NETWORK_THROUGHPUT,	"NETWORK_THROUGHPUT" }), +		  __entry->value, __entry->timeout_us) +); + +DECLARE_EVENT_CLASS(pm_qos_update, + +	TP_PROTO(enum pm_qos_req_action action, int prev_value, int curr_value), + +	TP_ARGS(action, prev_value, curr_value), + +	TP_STRUCT__entry( +		__field( enum pm_qos_req_action, action         ) +		__field( int,                    prev_value     ) +		__field( int,                    curr_value     ) +	), + +	TP_fast_assign( +		__entry->action = action; +		__entry->prev_value = prev_value; +		__entry->curr_value = curr_value; +	), + +	TP_printk("action=%s prev_value=%d curr_value=%d", +		  __print_symbolic(__entry->action, +			{ PM_QOS_ADD_REQ,	"ADD_REQ" }, +			{ PM_QOS_UPDATE_REQ,	"UPDATE_REQ" }, +			{ PM_QOS_REMOVE_REQ,	"REMOVE_REQ" }), +		  __entry->prev_value, __entry->curr_value) +); + +DEFINE_EVENT(pm_qos_update, pm_qos_update_target, + +	TP_PROTO(enum pm_qos_req_action action, int prev_value, int curr_value), + +	TP_ARGS(action, prev_value, curr_value) +); + +DEFINE_EVENT_PRINT(pm_qos_update, pm_qos_update_flags, + +	TP_PROTO(enum pm_qos_req_action action, int prev_value, int curr_value), + +	TP_ARGS(action, prev_value, curr_value), + +	TP_printk("action=%s prev_value=0x%x curr_value=0x%x", +		  __print_symbolic(__entry->action, +			{ PM_QOS_ADD_REQ,	"ADD_REQ" }, +			{ PM_QOS_UPDATE_REQ,	"UPDATE_REQ" }, +			{ PM_QOS_REMOVE_REQ,	"REMOVE_REQ" }), +		  __entry->prev_value, __entry->curr_value) +); + +DECLARE_EVENT_CLASS(dev_pm_qos_request, + +	TP_PROTO(const char *name, enum dev_pm_qos_req_type type, +		 s32 new_value), + +	TP_ARGS(name, type, new_value), + +	TP_STRUCT__entry( +		__string( name,                    name         ) +		__field( enum dev_pm_qos_req_type, type         ) +		__field( s32,                      new_value    ) +	), + +	TP_fast_assign( +		__assign_str(name, name); +		__entry->type = type; +		__entry->new_value = new_value; +	), + +	TP_printk("device=%s type=%s new_value=%d", +		  __get_str(name), +		  __print_symbolic(__entry->type, +			{ DEV_PM_QOS_RESUME_LATENCY, "DEV_PM_QOS_RESUME_LATENCY" }, +			{ DEV_PM_QOS_FLAGS, "DEV_PM_QOS_FLAGS" }), +		  __entry->new_value) +); + +DEFINE_EVENT(dev_pm_qos_request, dev_pm_qos_add_request, + +	TP_PROTO(const char *name, enum dev_pm_qos_req_type type, +		 s32 new_value), + +	TP_ARGS(name, type, new_value) +); + +DEFINE_EVENT(dev_pm_qos_request, dev_pm_qos_update_request, + +	TP_PROTO(const char *name, enum dev_pm_qos_req_type type, +		 s32 new_value), + +	TP_ARGS(name, type, new_value) +); + +DEFINE_EVENT(dev_pm_qos_request, dev_pm_qos_remove_request, + +	TP_PROTO(const char *name, enum dev_pm_qos_req_type type, +		 s32 new_value), + +	TP_ARGS(name, type, new_value) +);  #endif /* _TRACE_POWER_H */  /* This part must be outside protection */ diff --git a/include/trace/events/power_cpu_migrate.h b/include/trace/events/power_cpu_migrate.h new file mode 100644 index 00000000000..f76dd4de625 --- /dev/null +++ b/include/trace/events/power_cpu_migrate.h @@ -0,0 +1,67 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM power + +#if !defined(_TRACE_POWER_CPU_MIGRATE_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_POWER_CPU_MIGRATE_H + +#include <linux/tracepoint.h> + +#define __cpu_migrate_proto			\ +	TP_PROTO(u64 timestamp,			\ +		 u32 cpu_hwid) +#define __cpu_migrate_args			\ +	TP_ARGS(timestamp,			\ +		cpu_hwid) + +DECLARE_EVENT_CLASS(cpu_migrate, + +	__cpu_migrate_proto, +	__cpu_migrate_args, + +	TP_STRUCT__entry( +		__field(u64,	timestamp		) +		__field(u32,	cpu_hwid		) +	), + +	TP_fast_assign( +		__entry->timestamp = timestamp; +		__entry->cpu_hwid = cpu_hwid; +	), + +	TP_printk("timestamp=%llu cpu_hwid=0x%08lX", +		(unsigned long long)__entry->timestamp, +		(unsigned long)__entry->cpu_hwid +	) +); + +#define __define_cpu_migrate_event(name)		\ +	DEFINE_EVENT(cpu_migrate, cpu_migrate_##name,	\ +		__cpu_migrate_proto,			\ +		__cpu_migrate_args			\ +	) + +__define_cpu_migrate_event(begin); +__define_cpu_migrate_event(finish); +__define_cpu_migrate_event(current); + +#undef __define_cpu_migrate +#undef __cpu_migrate_proto +#undef __cpu_migrate_args + +/* This file can get included multiple times, TRACE_HEADER_MULTI_READ at top */ +#ifndef _PWR_CPU_MIGRATE_EVENT_AVOID_DOUBLE_DEFINING +#define _PWR_CPU_MIGRATE_EVENT_AVOID_DOUBLE_DEFINING + +/* + * Set from_phys_cpu and to_phys_cpu to CPU_MIGRATE_ALL_CPUS to indicate + * a whole-cluster migration: + */ +#define CPU_MIGRATE_ALL_CPUS 0x80000000U +#endif + +#endif /* _TRACE_POWER_CPU_MIGRATE_H */ + +/* This part must be outside protection */ +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE power_cpu_migrate +#include <trace/define_trace.h> diff --git a/include/trace/events/printk.h b/include/trace/events/printk.h new file mode 100644 index 00000000000..c008bc99f9f --- /dev/null +++ b/include/trace/events/printk.h @@ -0,0 +1,28 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM printk + +#if !defined(_TRACE_PRINTK_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_PRINTK_H + +#include <linux/tracepoint.h> + +TRACE_EVENT(console, +	TP_PROTO(const char *text, size_t len), + +	TP_ARGS(text, len), + +	TP_STRUCT__entry( +		__dynamic_array(char, msg, len + 1) +	), + +	TP_fast_assign( +		memcpy(__get_dynamic_array(msg), text, len); +		((char *)__get_dynamic_array(msg))[len] = 0; +	), + +	TP_printk("%s", __get_str(msg)) +); +#endif /* _TRACE_PRINTK_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/random.h b/include/trace/events/random.h new file mode 100644 index 00000000000..805af6db41c --- /dev/null +++ b/include/trace/events/random.h @@ -0,0 +1,315 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM random + +#if !defined(_TRACE_RANDOM_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_RANDOM_H + +#include <linux/writeback.h> +#include <linux/tracepoint.h> + +TRACE_EVENT(add_device_randomness, +	TP_PROTO(int bytes, unsigned long IP), + +	TP_ARGS(bytes, IP), + +	TP_STRUCT__entry( +		__field(	  int,	bytes			) +		__field(unsigned long,	IP			) +	), + +	TP_fast_assign( +		__entry->bytes		= bytes; +		__entry->IP		= IP; +	), + +	TP_printk("bytes %d caller %pF", +		__entry->bytes, (void *)__entry->IP) +); + +DECLARE_EVENT_CLASS(random__mix_pool_bytes, +	TP_PROTO(const char *pool_name, int bytes, unsigned long IP), + +	TP_ARGS(pool_name, bytes, IP), + +	TP_STRUCT__entry( +		__field( const char *,	pool_name		) +		__field(	  int,	bytes			) +		__field(unsigned long,	IP			) +	), + +	TP_fast_assign( +		__entry->pool_name	= pool_name; +		__entry->bytes		= bytes; +		__entry->IP		= IP; +	), + +	TP_printk("%s pool: bytes %d caller %pF", +		  __entry->pool_name, __entry->bytes, (void *)__entry->IP) +); + +DEFINE_EVENT(random__mix_pool_bytes, mix_pool_bytes, +	TP_PROTO(const char *pool_name, int bytes, unsigned long IP), + +	TP_ARGS(pool_name, bytes, IP) +); + +DEFINE_EVENT(random__mix_pool_bytes, mix_pool_bytes_nolock, +	TP_PROTO(const char *pool_name, int bytes, unsigned long IP), + +	TP_ARGS(pool_name, bytes, IP) +); + +TRACE_EVENT(credit_entropy_bits, +	TP_PROTO(const char *pool_name, int bits, int entropy_count, +		 int entropy_total, unsigned long IP), + +	TP_ARGS(pool_name, bits, entropy_count, entropy_total, IP), + +	TP_STRUCT__entry( +		__field( const char *,	pool_name		) +		__field(	  int,	bits			) +		__field(	  int,	entropy_count		) +		__field(	  int,	entropy_total		) +		__field(unsigned long,	IP			) +	), + +	TP_fast_assign( +		__entry->pool_name	= pool_name; +		__entry->bits		= bits; +		__entry->entropy_count	= entropy_count; +		__entry->entropy_total	= entropy_total; +		__entry->IP		= IP; +	), + +	TP_printk("%s pool: bits %d entropy_count %d entropy_total %d " +		  "caller %pF", __entry->pool_name, __entry->bits, +		  __entry->entropy_count, __entry->entropy_total, +		  (void *)__entry->IP) +); + +TRACE_EVENT(push_to_pool, +	TP_PROTO(const char *pool_name, int pool_bits, int input_bits), + +	TP_ARGS(pool_name, pool_bits, input_bits), + +	TP_STRUCT__entry( +		__field( const char *,	pool_name		) +		__field(	  int,	pool_bits		) +		__field(	  int,	input_bits		) +	), + +	TP_fast_assign( +		__entry->pool_name	= pool_name; +		__entry->pool_bits	= pool_bits; +		__entry->input_bits	= input_bits; +	), + +	TP_printk("%s: pool_bits %d input_pool_bits %d", +		  __entry->pool_name, __entry->pool_bits, +		  __entry->input_bits) +); + +TRACE_EVENT(debit_entropy, +	TP_PROTO(const char *pool_name, int debit_bits), + +	TP_ARGS(pool_name, debit_bits), + +	TP_STRUCT__entry( +		__field( const char *,	pool_name		) +		__field(	  int,	debit_bits		) +	), + +	TP_fast_assign( +		__entry->pool_name	= pool_name; +		__entry->debit_bits	= debit_bits; +	), + +	TP_printk("%s: debit_bits %d", __entry->pool_name, +		  __entry->debit_bits) +); + +TRACE_EVENT(add_input_randomness, +	TP_PROTO(int input_bits), + +	TP_ARGS(input_bits), + +	TP_STRUCT__entry( +		__field(	  int,	input_bits		) +	), + +	TP_fast_assign( +		__entry->input_bits	= input_bits; +	), + +	TP_printk("input_pool_bits %d", __entry->input_bits) +); + +TRACE_EVENT(add_disk_randomness, +	TP_PROTO(dev_t dev, int input_bits), + +	TP_ARGS(dev, input_bits), + +	TP_STRUCT__entry( +		__field(	dev_t,	dev			) +		__field(	  int,	input_bits		) +	), + +	TP_fast_assign( +		__entry->dev		= dev; +		__entry->input_bits	= input_bits; +	), + +	TP_printk("dev %d,%d input_pool_bits %d", MAJOR(__entry->dev), +		  MINOR(__entry->dev), __entry->input_bits) +); + +TRACE_EVENT(xfer_secondary_pool, +	TP_PROTO(const char *pool_name, int xfer_bits, int request_bits, +		 int pool_entropy, int input_entropy), + +	TP_ARGS(pool_name, xfer_bits, request_bits, pool_entropy, +		input_entropy), + +	TP_STRUCT__entry( +		__field( const char *,	pool_name		) +		__field(	  int,	xfer_bits		) +		__field(	  int,	request_bits		) +		__field(	  int,	pool_entropy		) +		__field(	  int,	input_entropy		) +	), + +	TP_fast_assign( +		__entry->pool_name	= pool_name; +		__entry->xfer_bits	= xfer_bits; +		__entry->request_bits	= request_bits; +		__entry->pool_entropy	= pool_entropy; +		__entry->input_entropy	= input_entropy; +	), + +	TP_printk("pool %s xfer_bits %d request_bits %d pool_entropy %d " +		  "input_entropy %d", __entry->pool_name, __entry->xfer_bits, +		  __entry->request_bits, __entry->pool_entropy, +		  __entry->input_entropy) +); + +DECLARE_EVENT_CLASS(random__get_random_bytes, +	TP_PROTO(int nbytes, unsigned long IP), + +	TP_ARGS(nbytes, IP), + +	TP_STRUCT__entry( +		__field(	  int,	nbytes			) +		__field(unsigned long,	IP			) +	), + +	TP_fast_assign( +		__entry->nbytes		= nbytes; +		__entry->IP		= IP; +	), + +	TP_printk("nbytes %d caller %pF", __entry->nbytes, (void *)__entry->IP) +); + +DEFINE_EVENT(random__get_random_bytes, get_random_bytes, +	TP_PROTO(int nbytes, unsigned long IP), + +	TP_ARGS(nbytes, IP) +); + +DEFINE_EVENT(random__get_random_bytes, get_random_bytes_arch, +	TP_PROTO(int nbytes, unsigned long IP), + +	TP_ARGS(nbytes, IP) +); + +DECLARE_EVENT_CLASS(random__extract_entropy, +	TP_PROTO(const char *pool_name, int nbytes, int entropy_count, +		 unsigned long IP), + +	TP_ARGS(pool_name, nbytes, entropy_count, IP), + +	TP_STRUCT__entry( +		__field( const char *,	pool_name		) +		__field(	  int,	nbytes			) +		__field(	  int,	entropy_count		) +		__field(unsigned long,	IP			) +	), + +	TP_fast_assign( +		__entry->pool_name	= pool_name; +		__entry->nbytes		= nbytes; +		__entry->entropy_count	= entropy_count; +		__entry->IP		= IP; +	), + +	TP_printk("%s pool: nbytes %d entropy_count %d caller %pF", +		  __entry->pool_name, __entry->nbytes, __entry->entropy_count, +		  (void *)__entry->IP) +); + + +DEFINE_EVENT(random__extract_entropy, extract_entropy, +	TP_PROTO(const char *pool_name, int nbytes, int entropy_count, +		 unsigned long IP), + +	TP_ARGS(pool_name, nbytes, entropy_count, IP) +); + +DEFINE_EVENT(random__extract_entropy, extract_entropy_user, +	TP_PROTO(const char *pool_name, int nbytes, int entropy_count, +		 unsigned long IP), + +	TP_ARGS(pool_name, nbytes, entropy_count, IP) +); + +TRACE_EVENT(random_read, +	TP_PROTO(int got_bits, int need_bits, int pool_left, int input_left), + +	TP_ARGS(got_bits, need_bits, pool_left, input_left), + +	TP_STRUCT__entry( +		__field(	  int,	got_bits		) +		__field(	  int,	need_bits		) +		__field(	  int,	pool_left		) +		__field(	  int,	input_left		) +	), + +	TP_fast_assign( +		__entry->got_bits	= got_bits; +		__entry->need_bits	= need_bits; +		__entry->pool_left	= pool_left; +		__entry->input_left	= input_left; +	), + +	TP_printk("got_bits %d still_needed_bits %d " +		  "blocking_pool_entropy_left %d input_entropy_left %d", +		  __entry->got_bits, __entry->got_bits, __entry->pool_left, +		  __entry->input_left) +); + +TRACE_EVENT(urandom_read, +	TP_PROTO(int got_bits, int pool_left, int input_left), + +	TP_ARGS(got_bits, pool_left, input_left), + +	TP_STRUCT__entry( +		__field(	  int,	got_bits		) +		__field(	  int,	pool_left		) +		__field(	  int,	input_left		) +	), + +	TP_fast_assign( +		__entry->got_bits	= got_bits; +		__entry->pool_left	= pool_left; +		__entry->input_left	= input_left; +	), + +	TP_printk("got_bits %d nonblocking_pool_entropy_left %d " +		  "input_entropy_left %d", __entry->got_bits, +		  __entry->pool_left, __entry->input_left) +); + +#endif /* _TRACE_RANDOM_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/ras.h b/include/trace/events/ras.h new file mode 100644 index 00000000000..1c875ad1ee5 --- /dev/null +++ b/include/trace/events/ras.h @@ -0,0 +1,77 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM ras + +#if !defined(_TRACE_AER_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_AER_H + +#include <linux/tracepoint.h> +#include <linux/aer.h> + + +/* + * PCIe AER Trace event + * + * These events are generated when hardware detects a corrected or + * uncorrected event on a PCIe device. The event report has + * the following structure: + * + * char * dev_name -	The name of the slot where the device resides + *			([domain:]bus:device.function). + * u32 status -		Either the correctable or uncorrectable register + *			indicating what error or errors have been seen + * u8 severity -	error severity 0:NONFATAL 1:FATAL 2:CORRECTED + */ + +#define aer_correctable_errors		\ +	{BIT(0),	"Receiver Error"},		\ +	{BIT(6),	"Bad TLP"},			\ +	{BIT(7),	"Bad DLLP"},			\ +	{BIT(8),	"RELAY_NUM Rollover"},		\ +	{BIT(12),	"Replay Timer Timeout"},	\ +	{BIT(13),	"Advisory Non-Fatal"} + +#define aer_uncorrectable_errors		\ +	{BIT(4),	"Data Link Protocol"},		\ +	{BIT(12),	"Poisoned TLP"},		\ +	{BIT(13),	"Flow Control Protocol"},	\ +	{BIT(14),	"Completion Timeout"},		\ +	{BIT(15),	"Completer Abort"},		\ +	{BIT(16),	"Unexpected Completion"},	\ +	{BIT(17),	"Receiver Overflow"},		\ +	{BIT(18),	"Malformed TLP"},		\ +	{BIT(19),	"ECRC"},			\ +	{BIT(20),	"Unsupported Request"} + +TRACE_EVENT(aer_event, +	TP_PROTO(const char *dev_name, +		 const u32 status, +		 const u8 severity), + +	TP_ARGS(dev_name, status, severity), + +	TP_STRUCT__entry( +		__string(	dev_name,	dev_name	) +		__field(	u32,		status		) +		__field(	u8,		severity	) +	), + +	TP_fast_assign( +		__assign_str(dev_name, dev_name); +		__entry->status		= status; +		__entry->severity	= severity; +	), + +	TP_printk("%s PCIe Bus Error: severity=%s, %s\n", +		__get_str(dev_name), +		__entry->severity == AER_CORRECTABLE ? "Corrected" : +			__entry->severity == AER_FATAL ? +			"Fatal" : "Uncorrected, non-fatal", +		__entry->severity == AER_CORRECTABLE ? +		__print_flags(__entry->status, "|", aer_correctable_errors) : +		__print_flags(__entry->status, "|", aer_uncorrectable_errors)) +); + +#endif /* _TRACE_AER_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h new file mode 100644 index 00000000000..aca38226641 --- /dev/null +++ b/include/trace/events/rcu.h @@ -0,0 +1,737 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM rcu + +#if !defined(_TRACE_RCU_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_RCU_H + +#include <linux/tracepoint.h> + +/* + * Tracepoint for start/end markers used for utilization calculations. + * By convention, the string is of the following forms: + * + * "Start <activity>" -- Mark the start of the specified activity, + *			 such as "context switch".  Nesting is permitted. + * "End <activity>" -- Mark the end of the specified activity. + * + * An "@" character within "<activity>" is a comment character: Data + * reduction scripts will ignore the "@" and the remainder of the line. + */ +TRACE_EVENT(rcu_utilization, + +	TP_PROTO(const char *s), + +	TP_ARGS(s), + +	TP_STRUCT__entry( +		__field(const char *, s) +	), + +	TP_fast_assign( +		__entry->s = s; +	), + +	TP_printk("%s", __entry->s) +); + +#ifdef CONFIG_RCU_TRACE + +#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) + +/* + * Tracepoint for grace-period events.  Takes a string identifying the + * RCU flavor, the grace-period number, and a string identifying the + * grace-period-related event as follows: + * + *	"AccReadyCB": CPU acclerates new callbacks to RCU_NEXT_READY_TAIL. + *	"AccWaitCB": CPU accelerates new callbacks to RCU_WAIT_TAIL. + *	"newreq": Request a new grace period. + *	"start": Start a grace period. + *	"cpustart": CPU first notices a grace-period start. + *	"cpuqs": CPU passes through a quiescent state. + *	"cpuonl": CPU comes online. + *	"cpuofl": CPU goes offline. + *	"reqwait": GP kthread sleeps waiting for grace-period request. + *	"reqwaitsig": GP kthread awakened by signal from reqwait state. + *	"fqswait": GP kthread waiting until time to force quiescent states. + *	"fqsstart": GP kthread starts forcing quiescent states. + *	"fqsend": GP kthread done forcing quiescent states. + *	"fqswaitsig": GP kthread awakened by signal from fqswait state. + *	"end": End a grace period. + *	"cpuend": CPU first notices a grace-period end. + */ +TRACE_EVENT(rcu_grace_period, + +	TP_PROTO(const char *rcuname, unsigned long gpnum, const char *gpevent), + +	TP_ARGS(rcuname, gpnum, gpevent), + +	TP_STRUCT__entry( +		__field(const char *, rcuname) +		__field(unsigned long, gpnum) +		__field(const char *, gpevent) +	), + +	TP_fast_assign( +		__entry->rcuname = rcuname; +		__entry->gpnum = gpnum; +		__entry->gpevent = gpevent; +	), + +	TP_printk("%s %lu %s", +		  __entry->rcuname, __entry->gpnum, __entry->gpevent) +); + +/* + * Tracepoint for future grace-period events, including those for no-callbacks + * CPUs.  The caller should pull the data from the rcu_node structure, + * other than rcuname, which comes from the rcu_state structure, and event, + * which is one of the following: + * + * "Startleaf": Request a nocb grace period based on leaf-node data. + * "Startedleaf": Leaf-node start proved sufficient. + * "Startedleafroot": Leaf-node start proved sufficient after checking root. + * "Startedroot": Requested a nocb grace period based on root-node data. + * "StartWait": Start waiting for the requested grace period. + * "ResumeWait": Resume waiting after signal. + * "EndWait": Complete wait. + * "Cleanup": Clean up rcu_node structure after previous GP. + * "CleanupMore": Clean up, and another no-CB GP is needed. + */ +TRACE_EVENT(rcu_future_grace_period, + +	TP_PROTO(const char *rcuname, unsigned long gpnum, unsigned long completed, +		 unsigned long c, u8 level, int grplo, int grphi, +		 const char *gpevent), + +	TP_ARGS(rcuname, gpnum, completed, c, level, grplo, grphi, gpevent), + +	TP_STRUCT__entry( +		__field(const char *, rcuname) +		__field(unsigned long, gpnum) +		__field(unsigned long, completed) +		__field(unsigned long, c) +		__field(u8, level) +		__field(int, grplo) +		__field(int, grphi) +		__field(const char *, gpevent) +	), + +	TP_fast_assign( +		__entry->rcuname = rcuname; +		__entry->gpnum = gpnum; +		__entry->completed = completed; +		__entry->c = c; +		__entry->level = level; +		__entry->grplo = grplo; +		__entry->grphi = grphi; +		__entry->gpevent = gpevent; +	), + +	TP_printk("%s %lu %lu %lu %u %d %d %s", +		  __entry->rcuname, __entry->gpnum, __entry->completed, +		  __entry->c, __entry->level, __entry->grplo, __entry->grphi, +		  __entry->gpevent) +); + +/* + * Tracepoint for grace-period-initialization events.  These are + * distinguished by the type of RCU, the new grace-period number, the + * rcu_node structure level, the starting and ending CPU covered by the + * rcu_node structure, and the mask of CPUs that will be waited for. + * All but the type of RCU are extracted from the rcu_node structure. + */ +TRACE_EVENT(rcu_grace_period_init, + +	TP_PROTO(const char *rcuname, unsigned long gpnum, u8 level, +		 int grplo, int grphi, unsigned long qsmask), + +	TP_ARGS(rcuname, gpnum, level, grplo, grphi, qsmask), + +	TP_STRUCT__entry( +		__field(const char *, rcuname) +		__field(unsigned long, gpnum) +		__field(u8, level) +		__field(int, grplo) +		__field(int, grphi) +		__field(unsigned long, qsmask) +	), + +	TP_fast_assign( +		__entry->rcuname = rcuname; +		__entry->gpnum = gpnum; +		__entry->level = level; +		__entry->grplo = grplo; +		__entry->grphi = grphi; +		__entry->qsmask = qsmask; +	), + +	TP_printk("%s %lu %u %d %d %lx", +		  __entry->rcuname, __entry->gpnum, __entry->level, +		  __entry->grplo, __entry->grphi, __entry->qsmask) +); + +/* + * Tracepoint for RCU no-CBs CPU callback handoffs.  This event is intended + * to assist debugging of these handoffs. + * + * The first argument is the name of the RCU flavor, and the second is + * the number of the offloaded CPU are extracted.  The third and final + * argument is a string as follows: + * + *	"WakeEmpty": Wake rcuo kthread, first CB to empty list. + *	"WakeOvf": Wake rcuo kthread, CB list is huge. + *	"WakeNot": Don't wake rcuo kthread. + *	"WakeNotPoll": Don't wake rcuo kthread because it is polling. + *	"Poll": Start of new polling cycle for rcu_nocb_poll. + *	"Sleep": Sleep waiting for CBs for !rcu_nocb_poll. + *	"WokeEmpty": rcuo kthread woke to find empty list. + *	"WokeNonEmpty": rcuo kthread woke to find non-empty list. + *	"WaitQueue": Enqueue partially done, timed wait for it to complete. + *	"WokeQueue": Partial enqueue now complete. + */ +TRACE_EVENT(rcu_nocb_wake, + +	TP_PROTO(const char *rcuname, int cpu, const char *reason), + +	TP_ARGS(rcuname, cpu, reason), + +	TP_STRUCT__entry( +		__field(const char *, rcuname) +		__field(int, cpu) +		__field(const char *, reason) +	), + +	TP_fast_assign( +		__entry->rcuname = rcuname; +		__entry->cpu = cpu; +		__entry->reason = reason; +	), + +	TP_printk("%s %d %s", __entry->rcuname, __entry->cpu, __entry->reason) +); + +/* + * Tracepoint for tasks blocking within preemptible-RCU read-side + * critical sections.  Track the type of RCU (which one day might + * include SRCU), the grace-period number that the task is blocking + * (the current or the next), and the task's PID. + */ +TRACE_EVENT(rcu_preempt_task, + +	TP_PROTO(const char *rcuname, int pid, unsigned long gpnum), + +	TP_ARGS(rcuname, pid, gpnum), + +	TP_STRUCT__entry( +		__field(const char *, rcuname) +		__field(unsigned long, gpnum) +		__field(int, pid) +	), + +	TP_fast_assign( +		__entry->rcuname = rcuname; +		__entry->gpnum = gpnum; +		__entry->pid = pid; +	), + +	TP_printk("%s %lu %d", +		  __entry->rcuname, __entry->gpnum, __entry->pid) +); + +/* + * Tracepoint for tasks that blocked within a given preemptible-RCU + * read-side critical section exiting that critical section.  Track the + * type of RCU (which one day might include SRCU) and the task's PID. + */ +TRACE_EVENT(rcu_unlock_preempted_task, + +	TP_PROTO(const char *rcuname, unsigned long gpnum, int pid), + +	TP_ARGS(rcuname, gpnum, pid), + +	TP_STRUCT__entry( +		__field(const char *, rcuname) +		__field(unsigned long, gpnum) +		__field(int, pid) +	), + +	TP_fast_assign( +		__entry->rcuname = rcuname; +		__entry->gpnum = gpnum; +		__entry->pid = pid; +	), + +	TP_printk("%s %lu %d", __entry->rcuname, __entry->gpnum, __entry->pid) +); + +/* + * Tracepoint for quiescent-state-reporting events.  These are + * distinguished by the type of RCU, the grace-period number, the + * mask of quiescent lower-level entities, the rcu_node structure level, + * the starting and ending CPU covered by the rcu_node structure, and + * whether there are any blocked tasks blocking the current grace period. + * All but the type of RCU are extracted from the rcu_node structure. + */ +TRACE_EVENT(rcu_quiescent_state_report, + +	TP_PROTO(const char *rcuname, unsigned long gpnum, +		 unsigned long mask, unsigned long qsmask, +		 u8 level, int grplo, int grphi, int gp_tasks), + +	TP_ARGS(rcuname, gpnum, mask, qsmask, level, grplo, grphi, gp_tasks), + +	TP_STRUCT__entry( +		__field(const char *, rcuname) +		__field(unsigned long, gpnum) +		__field(unsigned long, mask) +		__field(unsigned long, qsmask) +		__field(u8, level) +		__field(int, grplo) +		__field(int, grphi) +		__field(u8, gp_tasks) +	), + +	TP_fast_assign( +		__entry->rcuname = rcuname; +		__entry->gpnum = gpnum; +		__entry->mask = mask; +		__entry->qsmask = qsmask; +		__entry->level = level; +		__entry->grplo = grplo; +		__entry->grphi = grphi; +		__entry->gp_tasks = gp_tasks; +	), + +	TP_printk("%s %lu %lx>%lx %u %d %d %u", +		  __entry->rcuname, __entry->gpnum, +		  __entry->mask, __entry->qsmask, __entry->level, +		  __entry->grplo, __entry->grphi, __entry->gp_tasks) +); + +/* + * Tracepoint for quiescent states detected by force_quiescent_state(). + * These trace events include the type of RCU, the grace-period number + * that was blocked by the CPU, the CPU itself, and the type of quiescent + * state, which can be "dti" for dyntick-idle mode, "ofl" for CPU offline, + * or "kick" when kicking a CPU that has been in dyntick-idle mode for + * too long. + */ +TRACE_EVENT(rcu_fqs, + +	TP_PROTO(const char *rcuname, unsigned long gpnum, int cpu, const char *qsevent), + +	TP_ARGS(rcuname, gpnum, cpu, qsevent), + +	TP_STRUCT__entry( +		__field(const char *, rcuname) +		__field(unsigned long, gpnum) +		__field(int, cpu) +		__field(const char *, qsevent) +	), + +	TP_fast_assign( +		__entry->rcuname = rcuname; +		__entry->gpnum = gpnum; +		__entry->cpu = cpu; +		__entry->qsevent = qsevent; +	), + +	TP_printk("%s %lu %d %s", +		  __entry->rcuname, __entry->gpnum, +		  __entry->cpu, __entry->qsevent) +); + +#endif /* #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) */ + +/* + * Tracepoint for dyntick-idle entry/exit events.  These take a string + * as argument: "Start" for entering dyntick-idle mode, "End" for + * leaving it, "--=" for events moving towards idle, and "++=" for events + * moving away from idle.  "Error on entry: not idle task" and "Error on + * exit: not idle task" indicate that a non-idle task is erroneously + * toying with the idle loop. + * + * These events also take a pair of numbers, which indicate the nesting + * depth before and after the event of interest.  Note that task-related + * events use the upper bits of each number, while interrupt-related + * events use the lower bits. + */ +TRACE_EVENT(rcu_dyntick, + +	TP_PROTO(const char *polarity, long long oldnesting, long long newnesting), + +	TP_ARGS(polarity, oldnesting, newnesting), + +	TP_STRUCT__entry( +		__field(const char *, polarity) +		__field(long long, oldnesting) +		__field(long long, newnesting) +	), + +	TP_fast_assign( +		__entry->polarity = polarity; +		__entry->oldnesting = oldnesting; +		__entry->newnesting = newnesting; +	), + +	TP_printk("%s %llx %llx", __entry->polarity, +		  __entry->oldnesting, __entry->newnesting) +); + +/* + * Tracepoint for RCU preparation for idle, the goal being to get RCU + * processing done so that the current CPU can shut off its scheduling + * clock and enter dyntick-idle mode.  One way to accomplish this is + * to drain all RCU callbacks from this CPU, and the other is to have + * done everything RCU requires for the current grace period.  In this + * latter case, the CPU will be awakened at the end of the current grace + * period in order to process the remainder of its callbacks. + * + * These tracepoints take a string as argument: + * + *	"No callbacks": Nothing to do, no callbacks on this CPU. + *	"In holdoff": Nothing to do, holding off after unsuccessful attempt. + *	"Begin holdoff": Attempt failed, don't retry until next jiffy. + *	"Dyntick with callbacks": Entering dyntick-idle despite callbacks. + *	"Dyntick with lazy callbacks": Entering dyntick-idle w/lazy callbacks. + *	"More callbacks": Still more callbacks, try again to clear them out. + *	"Callbacks drained": All callbacks processed, off to dyntick idle! + *	"Timer": Timer fired to cause CPU to continue processing callbacks. + *	"Demigrate": Timer fired on wrong CPU, woke up correct CPU. + *	"Cleanup after idle": Idle exited, timer canceled. + */ +TRACE_EVENT(rcu_prep_idle, + +	TP_PROTO(const char *reason), + +	TP_ARGS(reason), + +	TP_STRUCT__entry( +		__field(const char *, reason) +	), + +	TP_fast_assign( +		__entry->reason = reason; +	), + +	TP_printk("%s", __entry->reason) +); + +/* + * Tracepoint for the registration of a single RCU callback function. + * The first argument is the type of RCU, the second argument is + * a pointer to the RCU callback itself, the third element is the + * number of lazy callbacks queued, and the fourth element is the + * total number of callbacks queued. + */ +TRACE_EVENT(rcu_callback, + +	TP_PROTO(const char *rcuname, struct rcu_head *rhp, long qlen_lazy, +		 long qlen), + +	TP_ARGS(rcuname, rhp, qlen_lazy, qlen), + +	TP_STRUCT__entry( +		__field(const char *, rcuname) +		__field(void *, rhp) +		__field(void *, func) +		__field(long, qlen_lazy) +		__field(long, qlen) +	), + +	TP_fast_assign( +		__entry->rcuname = rcuname; +		__entry->rhp = rhp; +		__entry->func = rhp->func; +		__entry->qlen_lazy = qlen_lazy; +		__entry->qlen = qlen; +	), + +	TP_printk("%s rhp=%p func=%pf %ld/%ld", +		  __entry->rcuname, __entry->rhp, __entry->func, +		  __entry->qlen_lazy, __entry->qlen) +); + +/* + * Tracepoint for the registration of a single RCU callback of the special + * kfree() form.  The first argument is the RCU type, the second argument + * is a pointer to the RCU callback, the third argument is the offset + * of the callback within the enclosing RCU-protected data structure, + * the fourth argument is the number of lazy callbacks queued, and the + * fifth argument is the total number of callbacks queued. + */ +TRACE_EVENT(rcu_kfree_callback, + +	TP_PROTO(const char *rcuname, struct rcu_head *rhp, unsigned long offset, +		 long qlen_lazy, long qlen), + +	TP_ARGS(rcuname, rhp, offset, qlen_lazy, qlen), + +	TP_STRUCT__entry( +		__field(const char *, rcuname) +		__field(void *, rhp) +		__field(unsigned long, offset) +		__field(long, qlen_lazy) +		__field(long, qlen) +	), + +	TP_fast_assign( +		__entry->rcuname = rcuname; +		__entry->rhp = rhp; +		__entry->offset = offset; +		__entry->qlen_lazy = qlen_lazy; +		__entry->qlen = qlen; +	), + +	TP_printk("%s rhp=%p func=%ld %ld/%ld", +		  __entry->rcuname, __entry->rhp, __entry->offset, +		  __entry->qlen_lazy, __entry->qlen) +); + +/* + * Tracepoint for marking the beginning rcu_do_batch, performed to start + * RCU callback invocation.  The first argument is the RCU flavor, + * the second is the number of lazy callbacks queued, the third is + * the total number of callbacks queued, and the fourth argument is + * the current RCU-callback batch limit. + */ +TRACE_EVENT(rcu_batch_start, + +	TP_PROTO(const char *rcuname, long qlen_lazy, long qlen, long blimit), + +	TP_ARGS(rcuname, qlen_lazy, qlen, blimit), + +	TP_STRUCT__entry( +		__field(const char *, rcuname) +		__field(long, qlen_lazy) +		__field(long, qlen) +		__field(long, blimit) +	), + +	TP_fast_assign( +		__entry->rcuname = rcuname; +		__entry->qlen_lazy = qlen_lazy; +		__entry->qlen = qlen; +		__entry->blimit = blimit; +	), + +	TP_printk("%s CBs=%ld/%ld bl=%ld", +		  __entry->rcuname, __entry->qlen_lazy, __entry->qlen, +		  __entry->blimit) +); + +/* + * Tracepoint for the invocation of a single RCU callback function. + * The first argument is the type of RCU, and the second argument is + * a pointer to the RCU callback itself. + */ +TRACE_EVENT(rcu_invoke_callback, + +	TP_PROTO(const char *rcuname, struct rcu_head *rhp), + +	TP_ARGS(rcuname, rhp), + +	TP_STRUCT__entry( +		__field(const char *, rcuname) +		__field(void *, rhp) +		__field(void *, func) +	), + +	TP_fast_assign( +		__entry->rcuname = rcuname; +		__entry->rhp = rhp; +		__entry->func = rhp->func; +	), + +	TP_printk("%s rhp=%p func=%pf", +		  __entry->rcuname, __entry->rhp, __entry->func) +); + +/* + * Tracepoint for the invocation of a single RCU callback of the special + * kfree() form.  The first argument is the RCU flavor, the second + * argument is a pointer to the RCU callback, and the third argument + * is the offset of the callback within the enclosing RCU-protected + * data structure. + */ +TRACE_EVENT(rcu_invoke_kfree_callback, + +	TP_PROTO(const char *rcuname, struct rcu_head *rhp, unsigned long offset), + +	TP_ARGS(rcuname, rhp, offset), + +	TP_STRUCT__entry( +		__field(const char *, rcuname) +		__field(void *, rhp) +		__field(unsigned long, offset) +	), + +	TP_fast_assign( +		__entry->rcuname = rcuname; +		__entry->rhp = rhp; +		__entry->offset	= offset; +	), + +	TP_printk("%s rhp=%p func=%ld", +		  __entry->rcuname, __entry->rhp, __entry->offset) +); + +/* + * Tracepoint for exiting rcu_do_batch after RCU callbacks have been + * invoked.  The first argument is the name of the RCU flavor, + * the second argument is number of callbacks actually invoked, + * the third argument (cb) is whether or not any of the callbacks that + * were ready to invoke at the beginning of this batch are still + * queued, the fourth argument (nr) is the return value of need_resched(), + * the fifth argument (iit) is 1 if the current task is the idle task, + * and the sixth argument (risk) is the return value from + * rcu_is_callbacks_kthread(). + */ +TRACE_EVENT(rcu_batch_end, + +	TP_PROTO(const char *rcuname, int callbacks_invoked, +		 char cb, char nr, char iit, char risk), + +	TP_ARGS(rcuname, callbacks_invoked, cb, nr, iit, risk), + +	TP_STRUCT__entry( +		__field(const char *, rcuname) +		__field(int, callbacks_invoked) +		__field(char, cb) +		__field(char, nr) +		__field(char, iit) +		__field(char, risk) +	), + +	TP_fast_assign( +		__entry->rcuname = rcuname; +		__entry->callbacks_invoked = callbacks_invoked; +		__entry->cb = cb; +		__entry->nr = nr; +		__entry->iit = iit; +		__entry->risk = risk; +	), + +	TP_printk("%s CBs-invoked=%d idle=%c%c%c%c", +		  __entry->rcuname, __entry->callbacks_invoked, +		  __entry->cb ? 'C' : '.', +		  __entry->nr ? 'S' : '.', +		  __entry->iit ? 'I' : '.', +		  __entry->risk ? 'R' : '.') +); + +/* + * Tracepoint for rcutorture readers.  The first argument is the name + * of the RCU flavor from rcutorture's viewpoint and the second argument + * is the callback address. + */ +TRACE_EVENT(rcu_torture_read, + +	TP_PROTO(const char *rcutorturename, struct rcu_head *rhp, +		 unsigned long secs, unsigned long c_old, unsigned long c), + +	TP_ARGS(rcutorturename, rhp, secs, c_old, c), + +	TP_STRUCT__entry( +		__field(const char *, rcutorturename) +		__field(struct rcu_head *, rhp) +		__field(unsigned long, secs) +		__field(unsigned long, c_old) +		__field(unsigned long, c) +	), + +	TP_fast_assign( +		__entry->rcutorturename = rcutorturename; +		__entry->rhp = rhp; +		__entry->secs = secs; +		__entry->c_old = c_old; +		__entry->c = c; +	), + +	TP_printk("%s torture read %p %luus c: %lu %lu", +		  __entry->rcutorturename, __entry->rhp, +		  __entry->secs, __entry->c_old, __entry->c) +); + +/* + * Tracepoint for _rcu_barrier() execution.  The string "s" describes + * the _rcu_barrier phase: + *	"Begin": rcu_barrier_callback() started. + *	"Check": rcu_barrier_callback() checking for piggybacking. + *	"EarlyExit": rcu_barrier_callback() piggybacked, thus early exit. + *	"Inc1": rcu_barrier_callback() piggyback check counter incremented. + *	"Offline": rcu_barrier_callback() found offline CPU + *	"OnlineNoCB": rcu_barrier_callback() found online no-CBs CPU. + *	"OnlineQ": rcu_barrier_callback() found online CPU with callbacks. + *	"OnlineNQ": rcu_barrier_callback() found online CPU, no callbacks. + *	"IRQ": An rcu_barrier_callback() callback posted on remote CPU. + *	"CB": An rcu_barrier_callback() invoked a callback, not the last. + *	"LastCB": An rcu_barrier_callback() invoked the last callback. + *	"Inc2": rcu_barrier_callback() piggyback check counter incremented. + * The "cpu" argument is the CPU or -1 if meaningless, the "cnt" argument + * is the count of remaining callbacks, and "done" is the piggybacking count. + */ +TRACE_EVENT(rcu_barrier, + +	TP_PROTO(const char *rcuname, const char *s, int cpu, int cnt, unsigned long done), + +	TP_ARGS(rcuname, s, cpu, cnt, done), + +	TP_STRUCT__entry( +		__field(const char *, rcuname) +		__field(const char *, s) +		__field(int, cpu) +		__field(int, cnt) +		__field(unsigned long, done) +	), + +	TP_fast_assign( +		__entry->rcuname = rcuname; +		__entry->s = s; +		__entry->cpu = cpu; +		__entry->cnt = cnt; +		__entry->done = done; +	), + +	TP_printk("%s %s cpu %d remaining %d # %lu", +		  __entry->rcuname, __entry->s, __entry->cpu, __entry->cnt, +		  __entry->done) +); + +#else /* #ifdef CONFIG_RCU_TRACE */ + +#define trace_rcu_grace_period(rcuname, gpnum, gpevent) do { } while (0) +#define trace_rcu_grace_period_init(rcuname, gpnum, level, grplo, grphi, \ +				    qsmask) do { } while (0) +#define trace_rcu_future_grace_period(rcuname, gpnum, completed, c, \ +				      level, grplo, grphi, event) \ +				      do { } while (0) +#define trace_rcu_nocb_wake(rcuname, cpu, reason) do { } while (0) +#define trace_rcu_preempt_task(rcuname, pid, gpnum) do { } while (0) +#define trace_rcu_unlock_preempted_task(rcuname, gpnum, pid) do { } while (0) +#define trace_rcu_quiescent_state_report(rcuname, gpnum, mask, qsmask, level, \ +					 grplo, grphi, gp_tasks) do { } \ +	while (0) +#define trace_rcu_fqs(rcuname, gpnum, cpu, qsevent) do { } while (0) +#define trace_rcu_dyntick(polarity, oldnesting, newnesting) do { } while (0) +#define trace_rcu_prep_idle(reason) do { } while (0) +#define trace_rcu_callback(rcuname, rhp, qlen_lazy, qlen) do { } while (0) +#define trace_rcu_kfree_callback(rcuname, rhp, offset, qlen_lazy, qlen) \ +	do { } while (0) +#define trace_rcu_batch_start(rcuname, qlen_lazy, qlen, blimit) \ +	do { } while (0) +#define trace_rcu_invoke_callback(rcuname, rhp) do { } while (0) +#define trace_rcu_invoke_kfree_callback(rcuname, rhp, offset) do { } while (0) +#define trace_rcu_batch_end(rcuname, callbacks_invoked, cb, nr, iit, risk) \ +	do { } while (0) +#define trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \ +	do { } while (0) +#define trace_rcu_barrier(name, s, cpu, cnt, done) do { } while (0) + +#endif /* #else #ifdef CONFIG_RCU_TRACE */ + +#endif /* _TRACE_RCU_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/regmap.h b/include/trace/events/regmap.h new file mode 100644 index 00000000000..23d561512f6 --- /dev/null +++ b/include/trace/events/regmap.h @@ -0,0 +1,252 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM regmap + +#if !defined(_TRACE_REGMAP_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_REGMAP_H + +#include <linux/ktime.h> +#include <linux/tracepoint.h> + +struct device; +struct regmap; + +/* + * Log register events + */ +DECLARE_EVENT_CLASS(regmap_reg, + +	TP_PROTO(struct device *dev, unsigned int reg, +		 unsigned int val), + +	TP_ARGS(dev, reg, val), + +	TP_STRUCT__entry( +		__string(	name,		dev_name(dev)	) +		__field(	unsigned int,	reg		) +		__field(	unsigned int,	val		) +	), + +	TP_fast_assign( +		__assign_str(name, dev_name(dev)); +		__entry->reg = reg; +		__entry->val = val; +	), + +	TP_printk("%s reg=%x val=%x", __get_str(name), +		  (unsigned int)__entry->reg, +		  (unsigned int)__entry->val) +); + +DEFINE_EVENT(regmap_reg, regmap_reg_write, + +	TP_PROTO(struct device *dev, unsigned int reg, +		 unsigned int val), + +	TP_ARGS(dev, reg, val) + +); + +DEFINE_EVENT(regmap_reg, regmap_reg_read, + +	TP_PROTO(struct device *dev, unsigned int reg, +		 unsigned int val), + +	TP_ARGS(dev, reg, val) + +); + +DEFINE_EVENT(regmap_reg, regmap_reg_read_cache, + +	TP_PROTO(struct device *dev, unsigned int reg, +		 unsigned int val), + +	TP_ARGS(dev, reg, val) + +); + +DECLARE_EVENT_CLASS(regmap_block, + +	TP_PROTO(struct device *dev, unsigned int reg, int count), + +	TP_ARGS(dev, reg, count), + +	TP_STRUCT__entry( +		__string(	name,		dev_name(dev)	) +		__field(	unsigned int,	reg		) +		__field(	int,		count		) +	), + +	TP_fast_assign( +		__assign_str(name, dev_name(dev)); +		__entry->reg = reg; +		__entry->count = count; +	), + +	TP_printk("%s reg=%x count=%d", __get_str(name), +		  (unsigned int)__entry->reg, +		  (int)__entry->count) +); + +DEFINE_EVENT(regmap_block, regmap_hw_read_start, + +	TP_PROTO(struct device *dev, unsigned int reg, int count), + +	TP_ARGS(dev, reg, count) +); + +DEFINE_EVENT(regmap_block, regmap_hw_read_done, + +	TP_PROTO(struct device *dev, unsigned int reg, int count), + +	TP_ARGS(dev, reg, count) +); + +DEFINE_EVENT(regmap_block, regmap_hw_write_start, + +	TP_PROTO(struct device *dev, unsigned int reg, int count), + +	TP_ARGS(dev, reg, count) +); + +DEFINE_EVENT(regmap_block, regmap_hw_write_done, + +	TP_PROTO(struct device *dev, unsigned int reg, int count), + +	TP_ARGS(dev, reg, count) +); + +TRACE_EVENT(regcache_sync, + +	TP_PROTO(struct device *dev, const char *type, +		 const char *status), + +	TP_ARGS(dev, type, status), + +	TP_STRUCT__entry( +		__string(       name,           dev_name(dev)   ) +		__string(	status,		status		) +		__string(	type,		type		) +		__field(	int,		type		) +	), + +	TP_fast_assign( +		__assign_str(name, dev_name(dev)); +		__assign_str(status, status); +		__assign_str(type, type); +	), + +	TP_printk("%s type=%s status=%s", __get_str(name), +		  __get_str(type), __get_str(status)) +); + +DECLARE_EVENT_CLASS(regmap_bool, + +	TP_PROTO(struct device *dev, bool flag), + +	TP_ARGS(dev, flag), + +	TP_STRUCT__entry( +		__string(	name,		dev_name(dev)	) +		__field(	int,		flag		) +	), + +	TP_fast_assign( +		__assign_str(name, dev_name(dev)); +		__entry->flag = flag; +	), + +	TP_printk("%s flag=%d", __get_str(name), +		  (int)__entry->flag) +); + +DEFINE_EVENT(regmap_bool, regmap_cache_only, + +	TP_PROTO(struct device *dev, bool flag), + +	TP_ARGS(dev, flag) + +); + +DEFINE_EVENT(regmap_bool, regmap_cache_bypass, + +	TP_PROTO(struct device *dev, bool flag), + +	TP_ARGS(dev, flag) + +); + +DECLARE_EVENT_CLASS(regmap_async, + +	TP_PROTO(struct device *dev), + +	TP_ARGS(dev), + +	TP_STRUCT__entry( +		__string(	name,		dev_name(dev)	) +	), + +	TP_fast_assign( +		__assign_str(name, dev_name(dev)); +	), + +	TP_printk("%s", __get_str(name)) +); + +DEFINE_EVENT(regmap_block, regmap_async_write_start, + +	TP_PROTO(struct device *dev, unsigned int reg, int count), + +	TP_ARGS(dev, reg, count) +); + +DEFINE_EVENT(regmap_async, regmap_async_io_complete, + +	TP_PROTO(struct device *dev), + +	TP_ARGS(dev) + +); + +DEFINE_EVENT(regmap_async, regmap_async_complete_start, + +	TP_PROTO(struct device *dev), + +	TP_ARGS(dev) + +); + +DEFINE_EVENT(regmap_async, regmap_async_complete_done, + +	TP_PROTO(struct device *dev), + +	TP_ARGS(dev) + +); + +TRACE_EVENT(regcache_drop_region, + +	TP_PROTO(struct device *dev, unsigned int from, +		 unsigned int to), + +	TP_ARGS(dev, from, to), + +	TP_STRUCT__entry( +		__string(       name,           dev_name(dev)   ) +		__field(	unsigned int,	from		) +		__field(	unsigned int,	to		) +	), + +	TP_fast_assign( +		__assign_str(name, dev_name(dev)); +		__entry->from = from; +		__entry->to = to; +	), + +	TP_printk("%s %u-%u", __get_str(name), (unsigned int)__entry->from, +		  (unsigned int)__entry->to) +); + +#endif /* _TRACE_REGMAP_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/regulator.h b/include/trace/events/regulator.h new file mode 100644 index 00000000000..37502a7404b --- /dev/null +++ b/include/trace/events/regulator.h @@ -0,0 +1,141 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM regulator + +#if !defined(_TRACE_REGULATOR_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_REGULATOR_H + +#include <linux/ktime.h> +#include <linux/tracepoint.h> + +/* + * Events which just log themselves and the regulator name for enable/disable + * type tracking. + */ +DECLARE_EVENT_CLASS(regulator_basic, + +	TP_PROTO(const char *name), + +	TP_ARGS(name), + +	TP_STRUCT__entry( +		__string(	name,	name	) +	), + +	TP_fast_assign( +		__assign_str(name, name); +	), + +	TP_printk("name=%s", __get_str(name)) + +); + +DEFINE_EVENT(regulator_basic, regulator_enable, + +	TP_PROTO(const char *name), + +	TP_ARGS(name) + +); + +DEFINE_EVENT(regulator_basic, regulator_enable_delay, + +	TP_PROTO(const char *name), + +	TP_ARGS(name) + +); + +DEFINE_EVENT(regulator_basic, regulator_enable_complete, + +	TP_PROTO(const char *name), + +	TP_ARGS(name) + +); + +DEFINE_EVENT(regulator_basic, regulator_disable, + +	TP_PROTO(const char *name), + +	TP_ARGS(name) + +); + +DEFINE_EVENT(regulator_basic, regulator_disable_complete, + +	TP_PROTO(const char *name), + +	TP_ARGS(name) + +); + +/* + * Events that take a range of numerical values, mostly for voltages + * and so on. + */ +DECLARE_EVENT_CLASS(regulator_range, + +	TP_PROTO(const char *name, int min, int max), + +	TP_ARGS(name, min, max), + +	TP_STRUCT__entry( +		__string(	name,		name		) +		__field(        int,            min             ) +		__field(        int,            max             ) +	), + +	TP_fast_assign( +		__assign_str(name, name); +		__entry->min  = min; +		__entry->max  = max; +	), + +	TP_printk("name=%s (%d-%d)", __get_str(name), +		  (int)__entry->min, (int)__entry->max) +); + +DEFINE_EVENT(regulator_range, regulator_set_voltage, + +	TP_PROTO(const char *name, int min, int max), + +	TP_ARGS(name, min, max) + +); + + +/* + * Events that take a single value, mostly for readback and refcounts. + */ +DECLARE_EVENT_CLASS(regulator_value, + +	TP_PROTO(const char *name, unsigned int val), + +	TP_ARGS(name, val), + +	TP_STRUCT__entry( +		__string(	name,		name		) +		__field(        unsigned int,   val             ) +	), + +	TP_fast_assign( +		__assign_str(name, name); +		__entry->val  = val; +	), + +	TP_printk("name=%s, val=%u", __get_str(name), +		  (int)__entry->val) +); + +DEFINE_EVENT(regulator_value, regulator_set_voltage_complete, + +	TP_PROTO(const char *name, unsigned int value), + +	TP_ARGS(name, value) + +); + +#endif /* _TRACE_POWER_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/rpm.h b/include/trace/events/rpm.h new file mode 100644 index 00000000000..33f85b68c22 --- /dev/null +++ b/include/trace/events/rpm.h @@ -0,0 +1,100 @@ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM rpm + +#if !defined(_TRACE_RUNTIME_POWER_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_RUNTIME_POWER_H + +#include <linux/ktime.h> +#include <linux/tracepoint.h> + +struct device; + +/* + * The rpm_internal events are used for tracing some important + * runtime pm internal functions. + */ +DECLARE_EVENT_CLASS(rpm_internal, + +	TP_PROTO(struct device *dev, int flags), + +	TP_ARGS(dev, flags), + +	TP_STRUCT__entry( +		__string(       name,		dev_name(dev)	) +		__field(        int,            flags           ) +		__field(        int ,   	usage_count	) +		__field(        int ,   	disable_depth   ) +		__field(        int ,   	runtime_auto	) +		__field(        int ,   	request_pending	) +		__field(        int ,   	irq_safe	) +		__field(        int ,   	child_count 	) +	), + +	TP_fast_assign( +		__assign_str(name, dev_name(dev)); +		__entry->flags = flags; +		__entry->usage_count = atomic_read( +			&dev->power.usage_count); +		__entry->disable_depth = dev->power.disable_depth; +		__entry->runtime_auto = dev->power.runtime_auto; +		__entry->request_pending = dev->power.request_pending; +		__entry->irq_safe = dev->power.irq_safe; +		__entry->child_count = atomic_read( +			&dev->power.child_count); +	), + +	TP_printk("%s flags-%x cnt-%-2d dep-%-2d auto-%-1d p-%-1d" +			" irq-%-1d child-%d", +			__get_str(name), __entry->flags, +			__entry->usage_count, +			__entry->disable_depth, +			__entry->runtime_auto, +			__entry->request_pending, +			__entry->irq_safe, +			__entry->child_count +		 ) +); +DEFINE_EVENT(rpm_internal, rpm_suspend, + +	TP_PROTO(struct device *dev, int flags), + +	TP_ARGS(dev, flags) +); +DEFINE_EVENT(rpm_internal, rpm_resume, + +	TP_PROTO(struct device *dev, int flags), + +	TP_ARGS(dev, flags) +); +DEFINE_EVENT(rpm_internal, rpm_idle, + +	TP_PROTO(struct device *dev, int flags), + +	TP_ARGS(dev, flags) +); + +TRACE_EVENT(rpm_return_int, +	TP_PROTO(struct device *dev, unsigned long ip, int ret), +	TP_ARGS(dev, ip, ret), + +	TP_STRUCT__entry( +		__string(       name,		dev_name(dev)) +		__field(	unsigned long,		ip	) +		__field(	int,			ret	) +	), + +	TP_fast_assign( +		__assign_str(name, dev_name(dev)); +		__entry->ip = ip; +		__entry->ret = ret; +	), + +	TP_printk("%pS:%s ret=%d", (void *)__entry->ip, __get_str(name), +		__entry->ret) +); + +#endif /* _TRACE_RUNTIME_POWER_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index f6334782a59..0a68d5ae584 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -6,6 +6,7 @@  #include <linux/sched.h>  #include <linux/tracepoint.h> +#include <linux/binfmts.h>  /*   * Tracepoint for calling kthread_stop, performed to end a kthread: @@ -56,7 +57,7 @@ DECLARE_EVENT_CLASS(sched_wakeup_template,  	TP_PROTO(struct task_struct *p, int success), -	TP_ARGS(p, success), +	TP_ARGS(__perf_task(p), success),  	TP_STRUCT__entry(  		__array(	char,	comm,	TASK_COMM_LEN	) @@ -99,8 +100,8 @@ static inline long __trace_sched_switch_state(struct task_struct *p)  	/*  	 * For all intents and purposes a preempted task is a running task.  	 */ -	if (task_thread_info(p)->preempt_count & PREEMPT_ACTIVE) -		state = TASK_RUNNING; +	if (task_preempt_count(p) & PREEMPT_ACTIVE) +		state = TASK_RUNNING | TASK_STATE_MAX;  #endif  	return state; @@ -137,13 +138,14 @@ TRACE_EVENT(sched_switch,  		__entry->next_prio	= next->prio;  	), -	TP_printk("prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%s ==> next_comm=%s next_pid=%d next_prio=%d", +	TP_printk("prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%s%s ==> next_comm=%s next_pid=%d next_prio=%d",  		__entry->prev_comm, __entry->prev_pid, __entry->prev_prio, -		__entry->prev_state ? -		  __print_flags(__entry->prev_state, "|", +		__entry->prev_state & (TASK_STATE_MAX-1) ? +		  __print_flags(__entry->prev_state & (TASK_STATE_MAX-1), "|",  				{ 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" },  				{ 16, "Z" }, { 32, "X" }, { 64, "x" }, -				{ 128, "W" }) : "R", +				{ 128, "K" }, { 256, "W" }, { 512, "P" }) : "R", +		__entry->prev_state & TASK_STATE_MAX ? "+" : "",  		__entry->next_comm, __entry->next_pid, __entry->next_prio)  ); @@ -275,6 +277,32 @@ TRACE_EVENT(sched_process_fork,  );  /* + * Tracepoint for exec: + */ +TRACE_EVENT(sched_process_exec, + +	TP_PROTO(struct task_struct *p, pid_t old_pid, +		 struct linux_binprm *bprm), + +	TP_ARGS(p, old_pid, bprm), + +	TP_STRUCT__entry( +		__string(	filename,	bprm->filename	) +		__field(	pid_t,		pid		) +		__field(	pid_t,		old_pid		) +	), + +	TP_fast_assign( +		__assign_str(filename, bprm->filename); +		__entry->pid		= p->pid; +		__entry->old_pid	= old_pid; +	), + +	TP_printk("filename=%s pid=%d old_pid=%d", __get_str(filename), +		  __entry->pid, __entry->old_pid) +); + +/*   * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE   *     adding sched_stat support to SCHED_FIFO/RR would be welcome.   */ @@ -282,7 +310,7 @@ DECLARE_EVENT_CLASS(sched_stat_template,  	TP_PROTO(struct task_struct *tsk, u64 delay), -	TP_ARGS(tsk, delay), +	TP_ARGS(__perf_task(tsk), __perf_count(delay)),  	TP_STRUCT__entry(  		__array( char,	comm,	TASK_COMM_LEN	) @@ -294,9 +322,6 @@ DECLARE_EVENT_CLASS(sched_stat_template,  		memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);  		__entry->pid	= tsk->pid;  		__entry->delay	= delay; -	) -	TP_perf_assign( -		__perf_count(delay);  	),  	TP_printk("comm=%s pid=%d delay=%Lu [ns]", @@ -330,14 +355,21 @@ DEFINE_EVENT(sched_stat_template, sched_stat_iowait,  	     TP_ARGS(tsk, delay));  /* + * Tracepoint for accounting blocked time (time the task is in uninterruptible). + */ +DEFINE_EVENT(sched_stat_template, sched_stat_blocked, +	     TP_PROTO(struct task_struct *tsk, u64 delay), +	     TP_ARGS(tsk, delay)); + +/*   * Tracepoint for accounting runtime (time the task is executing   * on a CPU).   */ -TRACE_EVENT(sched_stat_runtime, +DECLARE_EVENT_CLASS(sched_stat_runtime,  	TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime), -	TP_ARGS(tsk, runtime, vruntime), +	TP_ARGS(tsk, __perf_count(runtime), vruntime),  	TP_STRUCT__entry(  		__array( char,	comm,	TASK_COMM_LEN	) @@ -351,9 +383,6 @@ TRACE_EVENT(sched_stat_runtime,  		__entry->pid		= tsk->pid;  		__entry->runtime	= runtime;  		__entry->vruntime	= vruntime; -	) -	TP_perf_assign( -		__perf_count(runtime);  	),  	TP_printk("comm=%s pid=%d runtime=%Lu [ns] vruntime=%Lu [ns]", @@ -362,6 +391,10 @@ TRACE_EVENT(sched_stat_runtime,  			(unsigned long long)__entry->vruntime)  ); +DEFINE_EVENT(sched_stat_runtime, sched_stat_runtime, +	     TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime), +	     TP_ARGS(tsk, runtime, vruntime)); +  /*   * Tracepoint for showing priority inheritance modifying a tasks   * priority. @@ -391,6 +424,132 @@ TRACE_EVENT(sched_pi_setprio,  			__entry->oldprio, __entry->newprio)  ); +#ifdef CONFIG_DETECT_HUNG_TASK +TRACE_EVENT(sched_process_hang, +	TP_PROTO(struct task_struct *tsk), +	TP_ARGS(tsk), + +	TP_STRUCT__entry( +		__array( char,	comm,	TASK_COMM_LEN	) +		__field( pid_t,	pid			) +	), + +	TP_fast_assign( +		memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN); +		__entry->pid = tsk->pid; +	), + +	TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid) +); +#endif /* CONFIG_DETECT_HUNG_TASK */ + +DECLARE_EVENT_CLASS(sched_move_task_template, + +	TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu), + +	TP_ARGS(tsk, src_cpu, dst_cpu), + +	TP_STRUCT__entry( +		__field( pid_t,	pid			) +		__field( pid_t,	tgid			) +		__field( pid_t,	ngid			) +		__field( int,	src_cpu			) +		__field( int,	src_nid			) +		__field( int,	dst_cpu			) +		__field( int,	dst_nid			) +	), + +	TP_fast_assign( +		__entry->pid		= task_pid_nr(tsk); +		__entry->tgid		= task_tgid_nr(tsk); +		__entry->ngid		= task_numa_group_id(tsk); +		__entry->src_cpu	= src_cpu; +		__entry->src_nid	= cpu_to_node(src_cpu); +		__entry->dst_cpu	= dst_cpu; +		__entry->dst_nid	= cpu_to_node(dst_cpu); +	), + +	TP_printk("pid=%d tgid=%d ngid=%d src_cpu=%d src_nid=%d dst_cpu=%d dst_nid=%d", +			__entry->pid, __entry->tgid, __entry->ngid, +			__entry->src_cpu, __entry->src_nid, +			__entry->dst_cpu, __entry->dst_nid) +); + +/* + * Tracks migration of tasks from one runqueue to another. Can be used to + * detect if automatic NUMA balancing is bouncing between nodes + */ +DEFINE_EVENT(sched_move_task_template, sched_move_numa, +	TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu), + +	TP_ARGS(tsk, src_cpu, dst_cpu) +); + +DEFINE_EVENT(sched_move_task_template, sched_stick_numa, +	TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu), + +	TP_ARGS(tsk, src_cpu, dst_cpu) +); + +TRACE_EVENT(sched_swap_numa, + +	TP_PROTO(struct task_struct *src_tsk, int src_cpu, +		 struct task_struct *dst_tsk, int dst_cpu), + +	TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu), + +	TP_STRUCT__entry( +		__field( pid_t,	src_pid			) +		__field( pid_t,	src_tgid		) +		__field( pid_t,	src_ngid		) +		__field( int,	src_cpu			) +		__field( int,	src_nid			) +		__field( pid_t,	dst_pid			) +		__field( pid_t,	dst_tgid		) +		__field( pid_t,	dst_ngid		) +		__field( int,	dst_cpu			) +		__field( int,	dst_nid			) +	), + +	TP_fast_assign( +		__entry->src_pid	= task_pid_nr(src_tsk); +		__entry->src_tgid	= task_tgid_nr(src_tsk); +		__entry->src_ngid	= task_numa_group_id(src_tsk); +		__entry->src_cpu	= src_cpu; +		__entry->src_nid	= cpu_to_node(src_cpu); +		__entry->dst_pid	= task_pid_nr(dst_tsk); +		__entry->dst_tgid	= task_tgid_nr(dst_tsk); +		__entry->dst_ngid	= task_numa_group_id(dst_tsk); +		__entry->dst_cpu	= dst_cpu; +		__entry->dst_nid	= cpu_to_node(dst_cpu); +	), + +	TP_printk("src_pid=%d src_tgid=%d src_ngid=%d src_cpu=%d src_nid=%d dst_pid=%d dst_tgid=%d dst_ngid=%d dst_cpu=%d dst_nid=%d", +			__entry->src_pid, __entry->src_tgid, __entry->src_ngid, +			__entry->src_cpu, __entry->src_nid, +			__entry->dst_pid, __entry->dst_tgid, __entry->dst_ngid, +			__entry->dst_cpu, __entry->dst_nid) +); + +/* + * Tracepoint for waking a polling cpu without an IPI. + */ +TRACE_EVENT(sched_wake_idle_without_ipi, + +	TP_PROTO(int cpu), + +	TP_ARGS(cpu), + +	TP_STRUCT__entry( +		__field(	int,	cpu	) +	), + +	TP_fast_assign( +		__entry->cpu	= cpu; +	), + +	TP_printk("cpu=%d", __entry->cpu) +);  #endif /* _TRACE_SCHED_H */  /* This part must be outside protection */ diff --git a/include/trace/events/scsi.h b/include/trace/events/scsi.h index 25fbefdf2f2..db6c93510f7 100644 --- a/include/trace/events/scsi.h +++ b/include/trace/events/scsi.h @@ -184,6 +184,17 @@  		scsi_statusbyte_name(SAM_STAT_ACA_ACTIVE),	\  		scsi_statusbyte_name(SAM_STAT_TASK_ABORTED)) +#define scsi_prot_op_name(result)	{ result, #result } +#define show_prot_op_name(val)					\ +	__print_symbolic(val,					\ +		scsi_prot_op_name(SCSI_PROT_NORMAL),		\ +		scsi_prot_op_name(SCSI_PROT_READ_INSERT),	\ +		scsi_prot_op_name(SCSI_PROT_WRITE_STRIP),	\ +		scsi_prot_op_name(SCSI_PROT_READ_STRIP),	\ +		scsi_prot_op_name(SCSI_PROT_WRITE_INSERT),	\ +		scsi_prot_op_name(SCSI_PROT_READ_PASS),		\ +		scsi_prot_op_name(SCSI_PROT_WRITE_PASS)) +  const char *scsi_trace_parse_cdb(struct trace_seq*, unsigned char*, int);  #define __parse_cdb(cdb, len) scsi_trace_parse_cdb(p, cdb, len) @@ -202,6 +213,7 @@ TRACE_EVENT(scsi_dispatch_cmd_start,  		__field( unsigned int,	cmd_len )  		__field( unsigned int,	data_sglen )  		__field( unsigned int,	prot_sglen ) +		__field( unsigned char,	prot_op )  		__dynamic_array(unsigned char,	cmnd, cmd->cmd_len)  	), @@ -214,13 +226,15 @@ TRACE_EVENT(scsi_dispatch_cmd_start,  		__entry->cmd_len	= cmd->cmd_len;  		__entry->data_sglen	= scsi_sg_count(cmd);  		__entry->prot_sglen	= scsi_prot_sg_count(cmd); +		__entry->prot_op	= scsi_get_prot_op(cmd);  		memcpy(__get_dynamic_array(cmnd), cmd->cmnd, cmd->cmd_len);  	),  	TP_printk("host_no=%u channel=%u id=%u lun=%u data_sgl=%u prot_sgl=%u" \ -		  " cmnd=(%s %s raw=%s)", +		  " prot_op=%s cmnd=(%s %s raw=%s)",  		  __entry->host_no, __entry->channel, __entry->id,  		  __entry->lun, __entry->data_sglen, __entry->prot_sglen, +		  show_prot_op_name(__entry->prot_op),  		  show_opcode_name(__entry->opcode),  		  __parse_cdb(__get_dynamic_array(cmnd), __entry->cmd_len),  		  __print_hex(__get_dynamic_array(cmnd), __entry->cmd_len)) @@ -242,6 +256,7 @@ TRACE_EVENT(scsi_dispatch_cmd_error,  		__field( unsigned int,	cmd_len )  		__field( unsigned int,	data_sglen )  		__field( unsigned int,	prot_sglen ) +		__field( unsigned char,	prot_op )  		__dynamic_array(unsigned char,	cmnd, cmd->cmd_len)  	), @@ -255,13 +270,15 @@ TRACE_EVENT(scsi_dispatch_cmd_error,  		__entry->cmd_len	= cmd->cmd_len;  		__entry->data_sglen	= scsi_sg_count(cmd);  		__entry->prot_sglen	= scsi_prot_sg_count(cmd); +		__entry->prot_op	= scsi_get_prot_op(cmd);  		memcpy(__get_dynamic_array(cmnd), cmd->cmnd, cmd->cmd_len);  	),  	TP_printk("host_no=%u channel=%u id=%u lun=%u data_sgl=%u prot_sgl=%u" \ -		  " cmnd=(%s %s raw=%s) rtn=%d", +		  " prot_op=%s cmnd=(%s %s raw=%s) rtn=%d",  		  __entry->host_no, __entry->channel, __entry->id,  		  __entry->lun, __entry->data_sglen, __entry->prot_sglen, +		  show_prot_op_name(__entry->prot_op),  		  show_opcode_name(__entry->opcode),  		  __parse_cdb(__get_dynamic_array(cmnd), __entry->cmd_len),  		  __print_hex(__get_dynamic_array(cmnd), __entry->cmd_len), @@ -284,6 +301,7 @@ DECLARE_EVENT_CLASS(scsi_cmd_done_timeout_template,  		__field( unsigned int,	cmd_len )  		__field( unsigned int,	data_sglen )  		__field( unsigned int,	prot_sglen ) +		__field( unsigned char,	prot_op )  		__dynamic_array(unsigned char,	cmnd, cmd->cmd_len)  	), @@ -297,14 +315,16 @@ DECLARE_EVENT_CLASS(scsi_cmd_done_timeout_template,  		__entry->cmd_len	= cmd->cmd_len;  		__entry->data_sglen	= scsi_sg_count(cmd);  		__entry->prot_sglen	= scsi_prot_sg_count(cmd); +		__entry->prot_op	= scsi_get_prot_op(cmd);  		memcpy(__get_dynamic_array(cmnd), cmd->cmnd, cmd->cmd_len);  	),  	TP_printk("host_no=%u channel=%u id=%u lun=%u data_sgl=%u " \ -		  "prot_sgl=%u cmnd=(%s %s raw=%s) result=(driver=%s host=%s " \ -		  "message=%s status=%s)", +		  "prot_sgl=%u prot_op=%s cmnd=(%s %s raw=%s) result=(driver=" \ +		  "%s host=%s message=%s status=%s)",  		  __entry->host_no, __entry->channel, __entry->id,  		  __entry->lun, __entry->data_sglen, __entry->prot_sglen, +		  show_prot_op_name(__entry->prot_op),  		  show_opcode_name(__entry->opcode),  		  __parse_cdb(__get_dynamic_array(cmnd), __entry->cmd_len),  		  __print_hex(__get_dynamic_array(cmnd), __entry->cmd_len), diff --git a/include/trace/events/signal.h b/include/trace/events/signal.h index 17df43464df..39a8a430d90 100644 --- a/include/trace/events/signal.h +++ b/include/trace/events/signal.h @@ -23,11 +23,23 @@  		}						\  	} while (0) +#ifndef TRACE_HEADER_MULTI_READ +enum { +	TRACE_SIGNAL_DELIVERED, +	TRACE_SIGNAL_IGNORED, +	TRACE_SIGNAL_ALREADY_PENDING, +	TRACE_SIGNAL_OVERFLOW_FAIL, +	TRACE_SIGNAL_LOSE_INFO, +}; +#endif +  /**   * signal_generate - called when a signal is generated   * @sig: signal number   * @info: pointer to struct siginfo   * @task: pointer to struct task_struct + * @group: shared or private + * @result: TRACE_SIGNAL_*   *   * Current process sends a 'sig' signal to 'task' process with   * 'info' siginfo. If 'info' is SEND_SIG_NOINFO or SEND_SIG_PRIV, @@ -37,9 +49,10 @@   */  TRACE_EVENT(signal_generate, -	TP_PROTO(int sig, struct siginfo *info, struct task_struct *task), +	TP_PROTO(int sig, struct siginfo *info, struct task_struct *task, +			int group, int result), -	TP_ARGS(sig, info, task), +	TP_ARGS(sig, info, task, group, result),  	TP_STRUCT__entry(  		__field(	int,	sig			) @@ -47,6 +60,8 @@ TRACE_EVENT(signal_generate,  		__field(	int,	code			)  		__array(	char,	comm,	TASK_COMM_LEN	)  		__field(	pid_t,	pid			) +		__field(	int,	group			) +		__field(	int,	result			)  	),  	TP_fast_assign( @@ -54,11 +69,14 @@ TRACE_EVENT(signal_generate,  		TP_STORE_SIGINFO(__entry, info);  		memcpy(__entry->comm, task->comm, TASK_COMM_LEN);  		__entry->pid	= task->pid; +		__entry->group	= group; +		__entry->result	= result;  	), -	TP_printk("sig=%d errno=%d code=%d comm=%s pid=%d", +	TP_printk("sig=%d errno=%d code=%d comm=%s pid=%d grp=%d res=%d",  		  __entry->sig, __entry->errno, __entry->code, -		  __entry->comm, __entry->pid) +		  __entry->comm, __entry->pid, __entry->group, +		  __entry->result)  );  /** @@ -101,65 +119,6 @@ TRACE_EVENT(signal_deliver,  		  __entry->sa_handler, __entry->sa_flags)  ); -DECLARE_EVENT_CLASS(signal_queue_overflow, - -	TP_PROTO(int sig, int group, struct siginfo *info), - -	TP_ARGS(sig, group, info), - -	TP_STRUCT__entry( -		__field(	int,	sig	) -		__field(	int,	group	) -		__field(	int,	errno	) -		__field(	int,	code	) -	), - -	TP_fast_assign( -		__entry->sig	= sig; -		__entry->group	= group; -		TP_STORE_SIGINFO(__entry, info); -	), - -	TP_printk("sig=%d group=%d errno=%d code=%d", -		  __entry->sig, __entry->group, __entry->errno, __entry->code) -); - -/** - * signal_overflow_fail - called when signal queue is overflow - * @sig: signal number - * @group: signal to process group or not (bool) - * @info: pointer to struct siginfo - * - * Kernel fails to generate 'sig' signal with 'info' siginfo, because - * siginfo queue is overflow, and the signal is dropped. - * 'group' is not 0 if the signal will be sent to a process group. - * 'sig' is always one of RT signals. - */ -DEFINE_EVENT(signal_queue_overflow, signal_overflow_fail, - -	TP_PROTO(int sig, int group, struct siginfo *info), - -	TP_ARGS(sig, group, info) -); - -/** - * signal_lose_info - called when siginfo is lost - * @sig: signal number - * @group: signal to process group or not (bool) - * @info: pointer to struct siginfo - * - * Kernel generates 'sig' signal but loses 'info' siginfo, because siginfo - * queue is overflow. - * 'group' is not 0 if the signal will be sent to a process group. - * 'sig' is always one of non-RT signals. - */ -DEFINE_EVENT(signal_queue_overflow, signal_lose_info, - -	TP_PROTO(int sig, int group, struct siginfo *info), - -	TP_ARGS(sig, group, info) -); -  #endif /* _TRACE_SIGNAL_H */  /* This part must be outside protection */ diff --git a/include/trace/events/skb.h b/include/trace/events/skb.h index 75ce9d500d8..0c68ae22da2 100644 --- a/include/trace/events/skb.h +++ b/include/trace/events/skb.h @@ -19,16 +19,14 @@ TRACE_EVENT(kfree_skb,  	TP_STRUCT__entry(  		__field(	void *,		skbaddr		) -		__field(	unsigned short,	protocol	)  		__field(	void *,		location	) +		__field(	unsigned short,	protocol	)  	),  	TP_fast_assign(  		__entry->skbaddr = skb; -		if (skb) { -			__entry->protocol = ntohs(skb->protocol); -		}  		__entry->location = location; +		__entry->protocol = ntohs(skb->protocol);  	),  	TP_printk("skbaddr=%p protocol=%u location=%p", diff --git a/include/trace/events/sock.h b/include/trace/events/sock.h new file mode 100644 index 00000000000..779abb91df8 --- /dev/null +++ b/include/trace/events/sock.h @@ -0,0 +1,68 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM sock + +#if !defined(_TRACE_SOCK_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_SOCK_H + +#include <net/sock.h> +#include <linux/tracepoint.h> + +TRACE_EVENT(sock_rcvqueue_full, + +	TP_PROTO(struct sock *sk, struct sk_buff *skb), + +	TP_ARGS(sk, skb), + +	TP_STRUCT__entry( +		__field(int, rmem_alloc) +		__field(unsigned int, truesize) +		__field(int, sk_rcvbuf) +	), + +	TP_fast_assign( +		__entry->rmem_alloc = atomic_read(&sk->sk_rmem_alloc); +		__entry->truesize   = skb->truesize; +		__entry->sk_rcvbuf  = sk->sk_rcvbuf; +	), + +	TP_printk("rmem_alloc=%d truesize=%u sk_rcvbuf=%d", +		__entry->rmem_alloc, __entry->truesize, __entry->sk_rcvbuf) +); + +TRACE_EVENT(sock_exceed_buf_limit, + +	TP_PROTO(struct sock *sk, struct proto *prot, long allocated), + +	TP_ARGS(sk, prot, allocated), + +	TP_STRUCT__entry( +		__array(char, name, 32) +		__field(long *, sysctl_mem) +		__field(long, allocated) +		__field(int, sysctl_rmem) +		__field(int, rmem_alloc) +	), + +	TP_fast_assign( +		strncpy(__entry->name, prot->name, 32); +		__entry->sysctl_mem = prot->sysctl_mem; +		__entry->allocated = allocated; +		__entry->sysctl_rmem = prot->sysctl_rmem[0]; +		__entry->rmem_alloc = atomic_read(&sk->sk_rmem_alloc); +	), + +	TP_printk("proto:%s sysctl_mem=%ld,%ld,%ld allocated=%ld " +		"sysctl_rmem=%d rmem_alloc=%d", +		__entry->name, +		__entry->sysctl_mem[0], +		__entry->sysctl_mem[1], +		__entry->sysctl_mem[2], +		__entry->allocated, +		__entry->sysctl_rmem, +		__entry->rmem_alloc) +); + +#endif /* _TRACE_SOCK_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/spi.h b/include/trace/events/spi.h new file mode 100644 index 00000000000..7e02c983bbe --- /dev/null +++ b/include/trace/events/spi.h @@ -0,0 +1,156 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM spi + +#if !defined(_TRACE_SPI_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_SPI_H + +#include <linux/ktime.h> +#include <linux/tracepoint.h> + +DECLARE_EVENT_CLASS(spi_master, + +	TP_PROTO(struct spi_master *master), + +	TP_ARGS(master), + +	TP_STRUCT__entry( +		__field(        int,           bus_num             ) +	), + +	TP_fast_assign( +		__entry->bus_num = master->bus_num; +	), + +	TP_printk("spi%d", (int)__entry->bus_num) + +); + +DEFINE_EVENT(spi_master, spi_master_idle, + +	TP_PROTO(struct spi_master *master), + +	TP_ARGS(master) + +); + +DEFINE_EVENT(spi_master, spi_master_busy, + +	TP_PROTO(struct spi_master *master), + +	TP_ARGS(master) + +); + +DECLARE_EVENT_CLASS(spi_message, + +	TP_PROTO(struct spi_message *msg), + +	TP_ARGS(msg), + +	TP_STRUCT__entry( +		__field(        int,            bus_num         ) +		__field(        int,            chip_select     ) +		__field(        struct spi_message *,   msg     ) +	), + +	TP_fast_assign( +		__entry->bus_num = msg->spi->master->bus_num; +		__entry->chip_select = msg->spi->chip_select; +		__entry->msg = msg; +	), + +        TP_printk("spi%d.%d %p", (int)__entry->bus_num, +		  (int)__entry->chip_select, +		  (struct spi_message *)__entry->msg) +); + +DEFINE_EVENT(spi_message, spi_message_submit, + +	TP_PROTO(struct spi_message *msg), + +	TP_ARGS(msg) + +); + +DEFINE_EVENT(spi_message, spi_message_start, + +	TP_PROTO(struct spi_message *msg), + +	TP_ARGS(msg) + +); + +TRACE_EVENT(spi_message_done, + +	TP_PROTO(struct spi_message *msg), + +	TP_ARGS(msg), + +	TP_STRUCT__entry( +		__field(        int,            bus_num         ) +		__field(        int,            chip_select     ) +		__field(        struct spi_message *,   msg     ) +		__field(        unsigned,       frame           ) +		__field(        unsigned,       actual          ) +	), + +	TP_fast_assign( +		__entry->bus_num = msg->spi->master->bus_num; +		__entry->chip_select = msg->spi->chip_select; +		__entry->msg = msg; +		__entry->frame = msg->frame_length; +		__entry->actual = msg->actual_length; +	), + +        TP_printk("spi%d.%d %p len=%u/%u", (int)__entry->bus_num, +		  (int)__entry->chip_select, +		  (struct spi_message *)__entry->msg, +                  (unsigned)__entry->actual, (unsigned)__entry->frame) +); + +DECLARE_EVENT_CLASS(spi_transfer, + +	TP_PROTO(struct spi_message *msg, struct spi_transfer *xfer), + +	TP_ARGS(msg, xfer), + +	TP_STRUCT__entry( +		__field(        int,            bus_num         ) +		__field(        int,            chip_select     ) +		__field(        struct spi_transfer *,   xfer   ) +		__field(        int,            len             ) +	), + +	TP_fast_assign( +		__entry->bus_num = msg->spi->master->bus_num; +		__entry->chip_select = msg->spi->chip_select; +		__entry->xfer = xfer; +		__entry->len = xfer->len; +	), + +        TP_printk("spi%d.%d %p len=%d", (int)__entry->bus_num, +		  (int)__entry->chip_select, +		  (struct spi_message *)__entry->xfer, +		  (int)__entry->len) +); + +DEFINE_EVENT(spi_transfer, spi_transfer_start, + +	TP_PROTO(struct spi_message *msg, struct spi_transfer *xfer), + +	TP_ARGS(msg, xfer) + +); + +DEFINE_EVENT(spi_transfer, spi_transfer_stop, + +	TP_PROTO(struct spi_message *msg, struct spi_transfer *xfer), + +	TP_ARGS(msg, xfer) + +); + +#endif /* _TRACE_POWER_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h new file mode 100644 index 00000000000..1fef3e6e943 --- /dev/null +++ b/include/trace/events/sunrpc.h @@ -0,0 +1,311 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM sunrpc + +#if !defined(_TRACE_SUNRPC_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_SUNRPC_H + +#include <linux/sunrpc/sched.h> +#include <linux/sunrpc/clnt.h> +#include <net/tcp_states.h> +#include <linux/net.h> +#include <linux/tracepoint.h> + +DECLARE_EVENT_CLASS(rpc_task_status, + +	TP_PROTO(struct rpc_task *task), + +	TP_ARGS(task), + +	TP_STRUCT__entry( +		__field(unsigned int, task_id) +		__field(unsigned int, client_id) +		__field(int, status) +	), + +	TP_fast_assign( +		__entry->task_id = task->tk_pid; +		__entry->client_id = task->tk_client->cl_clid; +		__entry->status = task->tk_status; +	), + +	TP_printk("task:%u@%u, status %d", +		__entry->task_id, __entry->client_id, +		__entry->status) +); + +DEFINE_EVENT(rpc_task_status, rpc_call_status, +	TP_PROTO(struct rpc_task *task), + +	TP_ARGS(task) +); + +DEFINE_EVENT(rpc_task_status, rpc_bind_status, +	TP_PROTO(struct rpc_task *task), + +	TP_ARGS(task) +); + +TRACE_EVENT(rpc_connect_status, +	TP_PROTO(struct rpc_task *task, int status), + +	TP_ARGS(task, status), + +	TP_STRUCT__entry( +		__field(unsigned int, task_id) +		__field(unsigned int, client_id) +		__field(int, status) +	), + +	TP_fast_assign( +		__entry->task_id = task->tk_pid; +		__entry->client_id = task->tk_client->cl_clid; +		__entry->status = status; +	), + +	TP_printk("task:%u@%u, status %d", +		__entry->task_id, __entry->client_id, +		__entry->status) +); + +DECLARE_EVENT_CLASS(rpc_task_running, + +	TP_PROTO(const struct rpc_clnt *clnt, const struct rpc_task *task, const void *action), + +	TP_ARGS(clnt, task, action), + +	TP_STRUCT__entry( +		__field(unsigned int, task_id) +		__field(unsigned int, client_id) +		__field(const void *, action) +		__field(unsigned long, runstate) +		__field(int, status) +		__field(unsigned short, flags) +		), + +	TP_fast_assign( +		__entry->client_id = clnt ? clnt->cl_clid : -1; +		__entry->task_id = task->tk_pid; +		__entry->action = action; +		__entry->runstate = task->tk_runstate; +		__entry->status = task->tk_status; +		__entry->flags = task->tk_flags; +		), + +	TP_printk("task:%u@%d flags=%4.4x state=%4.4lx status=%d action=%pf", +		__entry->task_id, __entry->client_id, +		__entry->flags, +		__entry->runstate, +		__entry->status, +		__entry->action +		) +); + +DEFINE_EVENT(rpc_task_running, rpc_task_begin, + +	TP_PROTO(const struct rpc_clnt *clnt, const struct rpc_task *task, const void *action), + +	TP_ARGS(clnt, task, action) + +); + +DEFINE_EVENT(rpc_task_running, rpc_task_run_action, + +	TP_PROTO(const struct rpc_clnt *clnt, const struct rpc_task *task, const void *action), + +	TP_ARGS(clnt, task, action) + +); + +DEFINE_EVENT(rpc_task_running, rpc_task_complete, + +	TP_PROTO(const struct rpc_clnt *clnt, const struct rpc_task *task, const void *action), + +	TP_ARGS(clnt, task, action) + +); + +DECLARE_EVENT_CLASS(rpc_task_queued, + +	TP_PROTO(const struct rpc_clnt *clnt, const struct rpc_task *task, const struct rpc_wait_queue *q), + +	TP_ARGS(clnt, task, q), + +	TP_STRUCT__entry( +		__field(unsigned int, task_id) +		__field(unsigned int, client_id) +		__field(unsigned long, timeout) +		__field(unsigned long, runstate) +		__field(int, status) +		__field(unsigned short, flags) +		__string(q_name, rpc_qname(q)) +		), + +	TP_fast_assign( +		__entry->client_id = clnt->cl_clid; +		__entry->task_id = task->tk_pid; +		__entry->timeout = task->tk_timeout; +		__entry->runstate = task->tk_runstate; +		__entry->status = task->tk_status; +		__entry->flags = task->tk_flags; +		__assign_str(q_name, rpc_qname(q)); +		), + +	TP_printk("task:%u@%u flags=%4.4x state=%4.4lx status=%d timeout=%lu queue=%s", +		__entry->task_id, __entry->client_id, +		__entry->flags, +		__entry->runstate, +		__entry->status, +		__entry->timeout, +		__get_str(q_name) +		) +); + +DEFINE_EVENT(rpc_task_queued, rpc_task_sleep, + +	TP_PROTO(const struct rpc_clnt *clnt, const struct rpc_task *task, const struct rpc_wait_queue *q), + +	TP_ARGS(clnt, task, q) + +); + +DEFINE_EVENT(rpc_task_queued, rpc_task_wakeup, + +	TP_PROTO(const struct rpc_clnt *clnt, const struct rpc_task *task, const struct rpc_wait_queue *q), + +	TP_ARGS(clnt, task, q) + +); + +#define rpc_show_socket_state(state) \ +	__print_symbolic(state, \ +		{ SS_FREE, "FREE" }, \ +		{ SS_UNCONNECTED, "UNCONNECTED" }, \ +		{ SS_CONNECTING, "CONNECTING," }, \ +		{ SS_CONNECTED, "CONNECTED," }, \ +		{ SS_DISCONNECTING, "DISCONNECTING" }) + +#define rpc_show_sock_state(state) \ +	__print_symbolic(state, \ +		{ TCP_ESTABLISHED, "ESTABLISHED" }, \ +		{ TCP_SYN_SENT, "SYN_SENT" }, \ +		{ TCP_SYN_RECV, "SYN_RECV" }, \ +		{ TCP_FIN_WAIT1, "FIN_WAIT1" }, \ +		{ TCP_FIN_WAIT2, "FIN_WAIT2" }, \ +		{ TCP_TIME_WAIT, "TIME_WAIT" }, \ +		{ TCP_CLOSE, "CLOSE" }, \ +		{ TCP_CLOSE_WAIT, "CLOSE_WAIT" }, \ +		{ TCP_LAST_ACK, "LAST_ACK" }, \ +		{ TCP_LISTEN, "LISTEN" }, \ +		{ TCP_CLOSING, "CLOSING" }) + +DECLARE_EVENT_CLASS(xs_socket_event, + +		TP_PROTO( +			struct rpc_xprt *xprt, +			struct socket *socket +		), + +		TP_ARGS(xprt, socket), + +		TP_STRUCT__entry( +			__field(unsigned int, socket_state) +			__field(unsigned int, sock_state) +			__field(unsigned long long, ino) +			__string(dstaddr, +				xprt->address_strings[RPC_DISPLAY_ADDR]) +			__string(dstport, +				xprt->address_strings[RPC_DISPLAY_PORT]) +		), + +		TP_fast_assign( +			struct inode *inode = SOCK_INODE(socket); +			__entry->socket_state = socket->state; +			__entry->sock_state = socket->sk->sk_state; +			__entry->ino = (unsigned long long)inode->i_ino; +			__assign_str(dstaddr, +				xprt->address_strings[RPC_DISPLAY_ADDR]); +			__assign_str(dstport, +				xprt->address_strings[RPC_DISPLAY_PORT]); +		), + +		TP_printk( +			"socket:[%llu] dstaddr=%s/%s " +			"state=%u (%s) sk_state=%u (%s)", +			__entry->ino, __get_str(dstaddr), __get_str(dstport), +			__entry->socket_state, +			rpc_show_socket_state(__entry->socket_state), +			__entry->sock_state, +			rpc_show_sock_state(__entry->sock_state) +		) +); +#define DEFINE_RPC_SOCKET_EVENT(name) \ +	DEFINE_EVENT(xs_socket_event, name, \ +			TP_PROTO( \ +				struct rpc_xprt *xprt, \ +				struct socket *socket \ +			), \ +			TP_ARGS(xprt, socket)) + +DECLARE_EVENT_CLASS(xs_socket_event_done, + +		TP_PROTO( +			struct rpc_xprt *xprt, +			struct socket *socket, +			int error +		), + +		TP_ARGS(xprt, socket, error), + +		TP_STRUCT__entry( +			__field(int, error) +			__field(unsigned int, socket_state) +			__field(unsigned int, sock_state) +			__field(unsigned long long, ino) +			__string(dstaddr, +				xprt->address_strings[RPC_DISPLAY_ADDR]) +			__string(dstport, +				xprt->address_strings[RPC_DISPLAY_PORT]) +		), + +		TP_fast_assign( +			struct inode *inode = SOCK_INODE(socket); +			__entry->socket_state = socket->state; +			__entry->sock_state = socket->sk->sk_state; +			__entry->ino = (unsigned long long)inode->i_ino; +			__entry->error = error; +			__assign_str(dstaddr, +				xprt->address_strings[RPC_DISPLAY_ADDR]); +			__assign_str(dstport, +				xprt->address_strings[RPC_DISPLAY_PORT]); +		), + +		TP_printk( +			"error=%d socket:[%llu] dstaddr=%s/%s " +			"state=%u (%s) sk_state=%u (%s)", +			__entry->error, +			__entry->ino, __get_str(dstaddr), __get_str(dstport), +			__entry->socket_state, +			rpc_show_socket_state(__entry->socket_state), +			__entry->sock_state, +			rpc_show_sock_state(__entry->sock_state) +		) +); +#define DEFINE_RPC_SOCKET_EVENT_DONE(name) \ +	DEFINE_EVENT(xs_socket_event_done, name, \ +			TP_PROTO( \ +				struct rpc_xprt *xprt, \ +				struct socket *socket, \ +				int error \ +			), \ +			TP_ARGS(xprt, socket, error)) + +DEFINE_RPC_SOCKET_EVENT(rpc_socket_state_change); +DEFINE_RPC_SOCKET_EVENT_DONE(rpc_socket_connect); +DEFINE_RPC_SOCKET_EVENT_DONE(rpc_socket_error); +DEFINE_RPC_SOCKET_EVENT_DONE(rpc_socket_reset_connection); +DEFINE_RPC_SOCKET_EVENT(rpc_socket_close); +DEFINE_RPC_SOCKET_EVENT(rpc_socket_shutdown); + +#endif /* _TRACE_SUNRPC_H */ + +#include <trace/define_trace.h> diff --git a/include/trace/events/swiotlb.h b/include/trace/events/swiotlb.h new file mode 100644 index 00000000000..7ea4c5e7c44 --- /dev/null +++ b/include/trace/events/swiotlb.h @@ -0,0 +1,46 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM swiotlb + +#if !defined(_TRACE_SWIOTLB_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_SWIOTLB_H + +#include <linux/tracepoint.h> + +TRACE_EVENT(swiotlb_bounced, + +	TP_PROTO(struct device *dev, +		 dma_addr_t dev_addr, +		 size_t size, +		 int swiotlb_force), + +	TP_ARGS(dev, dev_addr, size, swiotlb_force), + +	TP_STRUCT__entry( +		__string(	dev_name,	dev_name(dev)	) +		__field(	u64,	dma_mask		) +		__field(	dma_addr_t,	dev_addr	) +		__field(	size_t,	size			) +		__field(	int,	swiotlb_force		) +	), + +	TP_fast_assign( +		__assign_str(dev_name, dev_name(dev)); +		__entry->dma_mask = (dev->dma_mask ? *dev->dma_mask : 0); +		__entry->dev_addr = dev_addr; +		__entry->size = size; +		__entry->swiotlb_force = swiotlb_force; +	), + +	TP_printk("dev_name: %s dma_mask=%llx dev_addr=%llx " +		"size=%zu %s", +		__get_str(dev_name), +		__entry->dma_mask, +		(unsigned long long)__entry->dev_addr, +		__entry->size, +		__entry->swiotlb_force ? "swiotlb_force" : "" ) +); + +#endif /*  _TRACE_SWIOTLB_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/syscalls.h b/include/trace/events/syscalls.h index fb726ac7cae..14e49c79813 100644 --- a/include/trace/events/syscalls.h +++ b/include/trace/events/syscalls.h @@ -13,9 +13,6 @@  #ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS -extern void syscall_regfunc(void); -extern void syscall_unregfunc(void); -  TRACE_EVENT_FN(sys_enter,  	TP_PROTO(struct pt_regs *regs, long id), @@ -40,6 +37,8 @@ TRACE_EVENT_FN(sys_enter,  	syscall_regfunc, syscall_unregfunc  ); +TRACE_EVENT_FLAGS(sys_enter, TRACE_EVENT_FL_CAP_ANY) +  TRACE_EVENT_FN(sys_exit,  	TP_PROTO(struct pt_regs *regs, long ret), @@ -62,6 +61,8 @@ TRACE_EVENT_FN(sys_exit,  	syscall_regfunc, syscall_unregfunc  ); +TRACE_EVENT_FLAGS(sys_exit, TRACE_EVENT_FL_CAP_ANY) +  #endif /* CONFIG_HAVE_SYSCALL_TRACEPOINTS */  #endif /* _TRACE_EVENTS_SYSCALLS_H */ diff --git a/include/trace/events/target.h b/include/trace/events/target.h new file mode 100644 index 00000000000..da9cc0f05c9 --- /dev/null +++ b/include/trace/events/target.h @@ -0,0 +1,214 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM target + +#if !defined(_TRACE_TARGET_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_TARGET_H + +#include <linux/tracepoint.h> +#include <linux/trace_seq.h> +#include <scsi/scsi.h> +#include <scsi/scsi_tcq.h> +#include <target/target_core_base.h> + +/* cribbed verbatim from <trace/event/scsi.h> */ +#define scsi_opcode_name(opcode)	{ opcode, #opcode } +#define show_opcode_name(val)					\ +	__print_symbolic(val,					\ +		scsi_opcode_name(TEST_UNIT_READY),		\ +		scsi_opcode_name(REZERO_UNIT),			\ +		scsi_opcode_name(REQUEST_SENSE),		\ +		scsi_opcode_name(FORMAT_UNIT),			\ +		scsi_opcode_name(READ_BLOCK_LIMITS),		\ +		scsi_opcode_name(REASSIGN_BLOCKS),		\ +		scsi_opcode_name(INITIALIZE_ELEMENT_STATUS),	\ +		scsi_opcode_name(READ_6),			\ +		scsi_opcode_name(WRITE_6),			\ +		scsi_opcode_name(SEEK_6),			\ +		scsi_opcode_name(READ_REVERSE),			\ +		scsi_opcode_name(WRITE_FILEMARKS),		\ +		scsi_opcode_name(SPACE),			\ +		scsi_opcode_name(INQUIRY),			\ +		scsi_opcode_name(RECOVER_BUFFERED_DATA),	\ +		scsi_opcode_name(MODE_SELECT),			\ +		scsi_opcode_name(RESERVE),			\ +		scsi_opcode_name(RELEASE),			\ +		scsi_opcode_name(COPY),				\ +		scsi_opcode_name(ERASE),			\ +		scsi_opcode_name(MODE_SENSE),			\ +		scsi_opcode_name(START_STOP),			\ +		scsi_opcode_name(RECEIVE_DIAGNOSTIC),		\ +		scsi_opcode_name(SEND_DIAGNOSTIC),		\ +		scsi_opcode_name(ALLOW_MEDIUM_REMOVAL),		\ +		scsi_opcode_name(SET_WINDOW),			\ +		scsi_opcode_name(READ_CAPACITY),		\ +		scsi_opcode_name(READ_10),			\ +		scsi_opcode_name(WRITE_10),			\ +		scsi_opcode_name(SEEK_10),			\ +		scsi_opcode_name(POSITION_TO_ELEMENT),		\ +		scsi_opcode_name(WRITE_VERIFY),			\ +		scsi_opcode_name(VERIFY),			\ +		scsi_opcode_name(SEARCH_HIGH),			\ +		scsi_opcode_name(SEARCH_EQUAL),			\ +		scsi_opcode_name(SEARCH_LOW),			\ +		scsi_opcode_name(SET_LIMITS),			\ +		scsi_opcode_name(PRE_FETCH),			\ +		scsi_opcode_name(READ_POSITION),		\ +		scsi_opcode_name(SYNCHRONIZE_CACHE),		\ +		scsi_opcode_name(LOCK_UNLOCK_CACHE),		\ +		scsi_opcode_name(READ_DEFECT_DATA),		\ +		scsi_opcode_name(MEDIUM_SCAN),			\ +		scsi_opcode_name(COMPARE),			\ +		scsi_opcode_name(COPY_VERIFY),			\ +		scsi_opcode_name(WRITE_BUFFER),			\ +		scsi_opcode_name(READ_BUFFER),			\ +		scsi_opcode_name(UPDATE_BLOCK),			\ +		scsi_opcode_name(READ_LONG),			\ +		scsi_opcode_name(WRITE_LONG),			\ +		scsi_opcode_name(CHANGE_DEFINITION),		\ +		scsi_opcode_name(WRITE_SAME),			\ +		scsi_opcode_name(UNMAP),			\ +		scsi_opcode_name(READ_TOC),			\ +		scsi_opcode_name(LOG_SELECT),			\ +		scsi_opcode_name(LOG_SENSE),			\ +		scsi_opcode_name(XDWRITEREAD_10),		\ +		scsi_opcode_name(MODE_SELECT_10),		\ +		scsi_opcode_name(RESERVE_10),			\ +		scsi_opcode_name(RELEASE_10),			\ +		scsi_opcode_name(MODE_SENSE_10),		\ +		scsi_opcode_name(PERSISTENT_RESERVE_IN),	\ +		scsi_opcode_name(PERSISTENT_RESERVE_OUT),	\ +		scsi_opcode_name(VARIABLE_LENGTH_CMD),		\ +		scsi_opcode_name(REPORT_LUNS),			\ +		scsi_opcode_name(MAINTENANCE_IN),		\ +		scsi_opcode_name(MAINTENANCE_OUT),		\ +		scsi_opcode_name(MOVE_MEDIUM),			\ +		scsi_opcode_name(EXCHANGE_MEDIUM),		\ +		scsi_opcode_name(READ_12),			\ +		scsi_opcode_name(WRITE_12),			\ +		scsi_opcode_name(WRITE_VERIFY_12),		\ +		scsi_opcode_name(SEARCH_HIGH_12),		\ +		scsi_opcode_name(SEARCH_EQUAL_12),		\ +		scsi_opcode_name(SEARCH_LOW_12),		\ +		scsi_opcode_name(READ_ELEMENT_STATUS),		\ +		scsi_opcode_name(SEND_VOLUME_TAG),		\ +		scsi_opcode_name(WRITE_LONG_2),			\ +		scsi_opcode_name(READ_16),			\ +		scsi_opcode_name(WRITE_16),			\ +		scsi_opcode_name(VERIFY_16),			\ +		scsi_opcode_name(WRITE_SAME_16),		\ +		scsi_opcode_name(SERVICE_ACTION_IN),		\ +		scsi_opcode_name(SAI_READ_CAPACITY_16),		\ +		scsi_opcode_name(SAI_GET_LBA_STATUS),		\ +		scsi_opcode_name(MI_REPORT_TARGET_PGS),		\ +		scsi_opcode_name(MO_SET_TARGET_PGS),		\ +		scsi_opcode_name(READ_32),			\ +		scsi_opcode_name(WRITE_32),			\ +		scsi_opcode_name(WRITE_SAME_32),		\ +		scsi_opcode_name(ATA_16),			\ +		scsi_opcode_name(ATA_12)) + +#define show_task_attribute_name(val)				\ +	__print_symbolic(val,					\ +		{ MSG_SIMPLE_TAG,	"SIMPLE"	},	\ +		{ MSG_HEAD_TAG,		"HEAD"		},	\ +		{ MSG_ORDERED_TAG,	"ORDERED"	},	\ +		{ MSG_ACA_TAG,		"ACA"		} ) + +#define show_scsi_status_name(val)				\ +	__print_symbolic(val,					\ +		{ SAM_STAT_GOOD,	"GOOD" },		\ +		{ SAM_STAT_CHECK_CONDITION, "CHECK CONDITION" }, \ +		{ SAM_STAT_CONDITION_MET, "CONDITION MET" },	\ +		{ SAM_STAT_BUSY,	"BUSY" },		\ +		{ SAM_STAT_INTERMEDIATE, "INTERMEDIATE" },	\ +		{ SAM_STAT_INTERMEDIATE_CONDITION_MET, "INTERMEDIATE CONDITION MET" }, \ +		{ SAM_STAT_RESERVATION_CONFLICT, "RESERVATION CONFLICT" }, \ +		{ SAM_STAT_COMMAND_TERMINATED, "COMMAND TERMINATED" }, \ +		{ SAM_STAT_TASK_SET_FULL, "TASK SET FULL" },	\ +		{ SAM_STAT_ACA_ACTIVE, "ACA ACTIVE" },		\ +		{ SAM_STAT_TASK_ABORTED, "TASK ABORTED" } ) + +TRACE_EVENT(target_sequencer_start, + +	TP_PROTO(struct se_cmd *cmd), + +	TP_ARGS(cmd), + +	TP_STRUCT__entry( +		__field( unsigned int,	unpacked_lun	) +		__field( unsigned int,	opcode		) +		__field( unsigned int,	data_length	) +		__field( unsigned int,	task_attribute  ) +		__array( unsigned char,	cdb, TCM_MAX_COMMAND_SIZE	) +		__string( initiator,	cmd->se_sess->se_node_acl->initiatorname	) +	), + +	TP_fast_assign( +		__entry->unpacked_lun	= cmd->orig_fe_lun; +		__entry->opcode		= cmd->t_task_cdb[0]; +		__entry->data_length	= cmd->data_length; +		__entry->task_attribute	= cmd->sam_task_attr; +		memcpy(__entry->cdb, cmd->t_task_cdb, TCM_MAX_COMMAND_SIZE); +		__assign_str(initiator, cmd->se_sess->se_node_acl->initiatorname); +	), + +	TP_printk("%s -> LUN %03u %s data_length %6u  CDB %s  (TA:%s C:%02x)", +		  __get_str(initiator), __entry->unpacked_lun, +		  show_opcode_name(__entry->opcode), +		  __entry->data_length, __print_hex(__entry->cdb, 16), +		  show_task_attribute_name(__entry->task_attribute), +		  scsi_command_size(__entry->cdb) <= 16 ? +			__entry->cdb[scsi_command_size(__entry->cdb) - 1] : +			__entry->cdb[1] +	) +); + +TRACE_EVENT(target_cmd_complete, + +	TP_PROTO(struct se_cmd *cmd), + +	TP_ARGS(cmd), + +	TP_STRUCT__entry( +		__field( unsigned int,	unpacked_lun	) +		__field( unsigned int,	opcode		) +		__field( unsigned int,	data_length	) +		__field( unsigned int,	task_attribute  ) +		__field( unsigned char,	scsi_status	) +		__field( unsigned char,	sense_length	) +		__array( unsigned char,	cdb, TCM_MAX_COMMAND_SIZE	) +		__array( unsigned char,	sense_data, 18	) +		__string(initiator,	cmd->se_sess->se_node_acl->initiatorname) +	), + +	TP_fast_assign( +		__entry->unpacked_lun	= cmd->orig_fe_lun; +		__entry->opcode		= cmd->t_task_cdb[0]; +		__entry->data_length	= cmd->data_length; +		__entry->task_attribute	= cmd->sam_task_attr; +		__entry->scsi_status	= cmd->scsi_status; +		__entry->sense_length	= cmd->scsi_status == SAM_STAT_CHECK_CONDITION ? +			min(18, ((u8 *) cmd->sense_buffer)[SPC_ADD_SENSE_LEN_OFFSET] + 8) : 0; +		memcpy(__entry->cdb, cmd->t_task_cdb, TCM_MAX_COMMAND_SIZE); +		memcpy(__entry->sense_data, cmd->sense_buffer, __entry->sense_length); +		__assign_str(initiator, cmd->se_sess->se_node_acl->initiatorname); +	), + +	TP_printk("%s <- LUN %03u status %s (sense len %d%s%s)  %s data_length %6u  CDB %s  (TA:%s C:%02x)", +		  __get_str(initiator), __entry->unpacked_lun, +		  show_scsi_status_name(__entry->scsi_status), +		  __entry->sense_length, __entry->sense_length ? " / " : "", +		  __print_hex(__entry->sense_data, __entry->sense_length), +		  show_opcode_name(__entry->opcode), +		  __entry->data_length, __print_hex(__entry->cdb, 16), +		  show_task_attribute_name(__entry->task_attribute), +		  scsi_command_size(__entry->cdb) <= 16 ? +			__entry->cdb[scsi_command_size(__entry->cdb) - 1] : +			__entry->cdb[1] +	) +); + +#endif /*  _TRACE_TARGET_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/task.h b/include/trace/events/task.h new file mode 100644 index 00000000000..dee3bb1d5a6 --- /dev/null +++ b/include/trace/events/task.h @@ -0,0 +1,61 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM task + +#if !defined(_TRACE_TASK_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_TASK_H +#include <linux/tracepoint.h> + +TRACE_EVENT(task_newtask, + +	TP_PROTO(struct task_struct *task, unsigned long clone_flags), + +	TP_ARGS(task, clone_flags), + +	TP_STRUCT__entry( +		__field(	pid_t,	pid) +		__array(	char,	comm, TASK_COMM_LEN) +		__field( unsigned long, clone_flags) +		__field(	short,	oom_score_adj) +	), + +	TP_fast_assign( +		__entry->pid = task->pid; +		memcpy(__entry->comm, task->comm, TASK_COMM_LEN); +		__entry->clone_flags = clone_flags; +		__entry->oom_score_adj = task->signal->oom_score_adj; +	), + +	TP_printk("pid=%d comm=%s clone_flags=%lx oom_score_adj=%hd", +		__entry->pid, __entry->comm, +		__entry->clone_flags, __entry->oom_score_adj) +); + +TRACE_EVENT(task_rename, + +	TP_PROTO(struct task_struct *task, const char *comm), + +	TP_ARGS(task, comm), + +	TP_STRUCT__entry( +		__field(	pid_t,	pid) +		__array(	char, oldcomm,  TASK_COMM_LEN) +		__array(	char, newcomm,  TASK_COMM_LEN) +		__field(	short,	oom_score_adj) +	), + +	TP_fast_assign( +		__entry->pid = task->pid; +		memcpy(entry->oldcomm, task->comm, TASK_COMM_LEN); +		memcpy(entry->newcomm, comm, TASK_COMM_LEN); +		__entry->oom_score_adj = task->signal->oom_score_adj; +	), + +	TP_printk("pid=%d oldcomm=%s newcomm=%s oom_score_adj=%hd", +		__entry->pid, __entry->oldcomm, +		__entry->newcomm, __entry->oom_score_adj) +); + +#endif + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h index 425bcfe56c6..68c2c2000f0 100644 --- a/include/trace/events/timer.h +++ b/include/trace/events/timer.h @@ -123,7 +123,7 @@ DEFINE_EVENT(timer_class, timer_cancel,  /**   * hrtimer_init - called when the hrtimer is initialized - * @timer:	pointer to struct hrtimer + * @hrtimer:	pointer to struct hrtimer   * @clockid:	the hrtimers clock   * @mode:	the hrtimers mode   */ @@ -155,7 +155,7 @@ TRACE_EVENT(hrtimer_init,  /**   * hrtimer_start - called when the hrtimer is started - * @timer: pointer to struct hrtimer + * @hrtimer: pointer to struct hrtimer   */  TRACE_EVENT(hrtimer_start, @@ -186,8 +186,8 @@ TRACE_EVENT(hrtimer_start,  );  /** - * htimmer_expire_entry - called immediately before the hrtimer callback - * @timer:	pointer to struct hrtimer + * hrtimer_expire_entry - called immediately before the hrtimer callback + * @hrtimer:	pointer to struct hrtimer   * @now:	pointer to variable which contains current time of the   *		timers base.   * @@ -234,7 +234,7 @@ DECLARE_EVENT_CLASS(hrtimer_class,  /**   * hrtimer_expire_exit - called immediately after the hrtimer callback returns - * @timer:	pointer to struct hrtimer + * @hrtimer:	pointer to struct hrtimer   *   * When used in combination with the hrtimer_expire_entry tracepoint we can   * determine the runtime of the callback function. @@ -323,6 +323,27 @@ TRACE_EVENT(itimer_expire,  		  (int) __entry->pid, (unsigned long long)__entry->now)  ); +#ifdef CONFIG_NO_HZ_COMMON +TRACE_EVENT(tick_stop, + +	TP_PROTO(int success, char *error_msg), + +	TP_ARGS(success, error_msg), + +	TP_STRUCT__entry( +		__field( int ,		success	) +		__string( msg, 		error_msg ) +	), + +	TP_fast_assign( +		__entry->success	= success; +		__assign_str(msg, error_msg); +	), + +	TP_printk("success=%s msg=%s",  __entry->success ? "yes" : "no", __get_str(msg)) +); +#endif +  #endif /*  _TRACE_TIMER_H */  /* This part must be outside protection */ diff --git a/include/trace/events/udp.h b/include/trace/events/udp.h new file mode 100644 index 00000000000..a664bb94097 --- /dev/null +++ b/include/trace/events/udp.h @@ -0,0 +1,32 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM udp + +#if !defined(_TRACE_UDP_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_UDP_H + +#include <linux/udp.h> +#include <linux/tracepoint.h> + +TRACE_EVENT(udp_fail_queue_rcv_skb, + +	TP_PROTO(int rc, struct sock *sk), + +	TP_ARGS(rc, sk), + +	TP_STRUCT__entry( +		__field(int, rc) +		__field(__u16, lport) +	), + +	TP_fast_assign( +		__entry->rc = rc; +		__entry->lport = inet_sk(sk)->inet_num; +	), + +	TP_printk("rc=%d port=%hu", __entry->rc, __entry->lport) +); + +#endif /* _TRACE_UDP_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/v4l2.h b/include/trace/events/v4l2.h new file mode 100644 index 00000000000..b9bb1f20469 --- /dev/null +++ b/include/trace/events/v4l2.h @@ -0,0 +1,158 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM v4l2 + +#if !defined(_TRACE_V4L2_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_V4L2_H + +#include <linux/tracepoint.h> + +#define show_type(type)							       \ +	__print_symbolic(type,						       \ +		{ V4L2_BUF_TYPE_VIDEO_CAPTURE,	      "VIDEO_CAPTURE" },       \ +		{ V4L2_BUF_TYPE_VIDEO_OUTPUT,	      "VIDEO_OUTPUT" },	       \ +		{ V4L2_BUF_TYPE_VIDEO_OVERLAY,	      "VIDEO_OVERLAY" },       \ +		{ V4L2_BUF_TYPE_VBI_CAPTURE,	      "VBI_CAPTURE" },	       \ +		{ V4L2_BUF_TYPE_VBI_OUTPUT,	      "VBI_OUTPUT" },	       \ +		{ V4L2_BUF_TYPE_SLICED_VBI_CAPTURE,   "SLICED_VBI_CAPTURE" },  \ +		{ V4L2_BUF_TYPE_SLICED_VBI_OUTPUT,    "SLICED_VBI_OUTPUT" },   \ +		{ V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY, "VIDEO_OUTPUT_OVERLAY" },\ +		{ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE, "VIDEO_CAPTURE_MPLANE" },\ +		{ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,  "VIDEO_OUTPUT_MPLANE" }, \ +		{ V4L2_BUF_TYPE_SDR_CAPTURE,          "SDR_CAPTURE" },         \ +		{ V4L2_BUF_TYPE_PRIVATE,	      "PRIVATE" }) + +#define show_field(field)						\ +	__print_symbolic(field,						\ +		{ V4L2_FIELD_ANY,		"ANY" },		\ +		{ V4L2_FIELD_NONE,		"NONE" },		\ +		{ V4L2_FIELD_TOP,		"TOP" },		\ +		{ V4L2_FIELD_BOTTOM,		"BOTTOM" },		\ +		{ V4L2_FIELD_INTERLACED,	"INTERLACED" },		\ +		{ V4L2_FIELD_SEQ_TB,		"SEQ_TB" },		\ +		{ V4L2_FIELD_SEQ_BT,		"SEQ_BT" },		\ +		{ V4L2_FIELD_ALTERNATE,		"ALTERNATE" },		\ +		{ V4L2_FIELD_INTERLACED_TB,	"INTERLACED_TB" },      \ +		{ V4L2_FIELD_INTERLACED_BT,	"INTERLACED_BT" }) + +#define show_timecode_type(type)					\ +	__print_symbolic(type,						\ +		{ V4L2_TC_TYPE_24FPS,		"24FPS" },		\ +		{ V4L2_TC_TYPE_25FPS,		"25FPS" },		\ +		{ V4L2_TC_TYPE_30FPS,		"30FPS" },		\ +		{ V4L2_TC_TYPE_50FPS,		"50FPS" },		\ +		{ V4L2_TC_TYPE_60FPS,		"60FPS" }) + +#define show_flags(flags)						      \ +	__print_flags(flags, "|",					      \ +		{ V4L2_BUF_FLAG_MAPPED,		     "MAPPED" },	      \ +		{ V4L2_BUF_FLAG_QUEUED,		     "QUEUED" },	      \ +		{ V4L2_BUF_FLAG_DONE,		     "DONE" },		      \ +		{ V4L2_BUF_FLAG_KEYFRAME,	     "KEYFRAME" },	      \ +		{ V4L2_BUF_FLAG_PFRAME,		     "PFRAME" },	      \ +		{ V4L2_BUF_FLAG_BFRAME,		     "BFRAME" },	      \ +		{ V4L2_BUF_FLAG_ERROR,		     "ERROR" },		      \ +		{ V4L2_BUF_FLAG_TIMECODE,	     "TIMECODE" },	      \ +		{ V4L2_BUF_FLAG_PREPARED,	     "PREPARED" },	      \ +		{ V4L2_BUF_FLAG_NO_CACHE_INVALIDATE, "NO_CACHE_INVALIDATE" }, \ +		{ V4L2_BUF_FLAG_NO_CACHE_CLEAN,	     "NO_CACHE_CLEAN" },      \ +		{ V4L2_BUF_FLAG_TIMESTAMP_MASK,	     "TIMESTAMP_MASK" },      \ +		{ V4L2_BUF_FLAG_TIMESTAMP_UNKNOWN,   "TIMESTAMP_UNKNOWN" },   \ +		{ V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC, "TIMESTAMP_MONOTONIC" }, \ +		{ V4L2_BUF_FLAG_TIMESTAMP_COPY,	     "TIMESTAMP_COPY" }) + +#define show_timecode_flags(flags)					  \ +	__print_flags(flags, "|",					  \ +		{ V4L2_TC_FLAG_DROPFRAME,       "DROPFRAME" },		  \ +		{ V4L2_TC_FLAG_COLORFRAME,      "COLORFRAME" },		  \ +		{ V4L2_TC_USERBITS_USERDEFINED,	"USERBITS_USERDEFINED" }, \ +		{ V4L2_TC_USERBITS_8BITCHARS,	"USERBITS_8BITCHARS" }) + +#define V4L2_TRACE_EVENT(event_name)					\ +	TRACE_EVENT(event_name,						\ +		TP_PROTO(int minor, struct v4l2_buffer *buf),		\ +									\ +		TP_ARGS(minor, buf),					\ +									\ +		TP_STRUCT__entry(					\ +			__field(int, minor)				\ +			__field(u32, index)				\ +			__field(u32, type)				\ +			__field(u32, bytesused)				\ +			__field(u32, flags)				\ +			__field(u32, field)				\ +			__field(s64, timestamp)				\ +			__field(u32, timecode_type)			\ +			__field(u32, timecode_flags)			\ +			__field(u8, timecode_frames)			\ +			__field(u8, timecode_seconds)			\ +			__field(u8, timecode_minutes)			\ +			__field(u8, timecode_hours)			\ +			__field(u8, timecode_userbits0)			\ +			__field(u8, timecode_userbits1)			\ +			__field(u8, timecode_userbits2)			\ +			__field(u8, timecode_userbits3)			\ +			__field(u32, sequence)				\ +		),							\ +									\ +		TP_fast_assign(						\ +			__entry->minor = minor;				\ +			__entry->index = buf->index;			\ +			__entry->type = buf->type;			\ +			__entry->bytesused = buf->bytesused;		\ +			__entry->flags = buf->flags;			\ +			__entry->field = buf->field;			\ +			__entry->timestamp =				\ +				timeval_to_ns(&buf->timestamp);		\ +			__entry->timecode_type = buf->timecode.type;	\ +			__entry->timecode_flags = buf->timecode.flags;	\ +			__entry->timecode_frames =			\ +				buf->timecode.frames;			\ +			__entry->timecode_seconds =			\ +				buf->timecode.seconds;			\ +			__entry->timecode_minutes =			\ +				buf->timecode.minutes;			\ +			__entry->timecode_hours = buf->timecode.hours;	\ +			__entry->timecode_userbits0 =			\ +				buf->timecode.userbits[0];		\ +			__entry->timecode_userbits1 =			\ +				buf->timecode.userbits[1];		\ +			__entry->timecode_userbits2 =			\ +				buf->timecode.userbits[2];		\ +			__entry->timecode_userbits3 =			\ +				buf->timecode.userbits[3];		\ +			__entry->sequence = buf->sequence;		\ +		),							\ +									\ +		TP_printk("minor = %d, index = %u, type = %s, "		\ +			  "bytesused = %u, flags = %s, "		\ +			  "field = %s, timestamp = %llu, timecode = { "	\ +			  "type = %s, flags = %s, frames = %u, "	\ +			  "seconds = %u, minutes = %u, hours = %u, "	\ +			  "userbits = { %u %u %u %u } }, "		\ +			  "sequence = %u", __entry->minor,		\ +			  __entry->index, show_type(__entry->type),	\ +			  __entry->bytesused,				\ +			  show_flags(__entry->flags),			\ +			  show_field(__entry->field),			\ +			  __entry->timestamp,				\ +			  show_timecode_type(__entry->timecode_type),	\ +			  show_timecode_flags(__entry->timecode_flags),	\ +			  __entry->timecode_frames,			\ +			  __entry->timecode_seconds,			\ +			  __entry->timecode_minutes,			\ +			  __entry->timecode_hours,			\ +			  __entry->timecode_userbits0,			\ +			  __entry->timecode_userbits1,			\ +			  __entry->timecode_userbits2,			\ +			  __entry->timecode_userbits3,			\ +			  __entry->sequence				\ +		)							\ +	) + +V4L2_TRACE_EVENT(v4l2_dqbuf); +V4L2_TRACE_EVENT(v4l2_qbuf); + +#endif /* if !defined(_TRACE_V4L2_H) || defined(TRACE_HEADER_MULTI_READ) */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h index c255fcc587b..69590b6ffc0 100644 --- a/include/trace/events/vmscan.h +++ b/include/trace/events/vmscan.h @@ -6,12 +6,14 @@  #include <linux/types.h>  #include <linux/tracepoint.h> -#include "gfpflags.h" +#include <linux/mm.h> +#include <linux/memcontrol.h> +#include <trace/events/gfpflags.h>  #define RECLAIM_WB_ANON		0x0001u  #define RECLAIM_WB_FILE		0x0002u  #define RECLAIM_WB_MIXED	0x0010u -#define RECLAIM_WB_SYNC		0x0004u +#define RECLAIM_WB_SYNC		0x0004u /* Unused, all reclaim async */  #define RECLAIM_WB_ASYNC	0x0008u  #define show_reclaim_flags(flags)				\ @@ -23,15 +25,15 @@  		{RECLAIM_WB_ASYNC,	"RECLAIM_WB_ASYNC"}	\  		) : "RECLAIM_WB_NONE" -#define trace_reclaim_flags(page, sync) ( \ +#define trace_reclaim_flags(page) ( \  	(page_is_file_cache(page) ? RECLAIM_WB_FILE : RECLAIM_WB_ANON) | \ -	(sync == LUMPY_MODE_SYNC ? RECLAIM_WB_SYNC : RECLAIM_WB_ASYNC)   \ +	(RECLAIM_WB_ASYNC) \  	) -#define trace_shrink_flags(file, sync) ( \ -	(sync == LUMPY_MODE_SYNC ? RECLAIM_WB_MIXED : \ -			(file ? RECLAIM_WB_FILE : RECLAIM_WB_ANON)) |  \ -	(sync == LUMPY_MODE_SYNC ? RECLAIM_WB_SYNC : RECLAIM_WB_ASYNC) \ +#define trace_shrink_flags(file) \ +	( \ +		(file ? RECLAIM_WB_FILE : RECLAIM_WB_ANON) | \ +		(RECLAIM_WB_ASYNC) \  	)  TRACE_EVENT(mm_vmscan_kswapd_sleep, @@ -177,6 +179,90 @@ DEFINE_EVENT(mm_vmscan_direct_reclaim_end_template, mm_vmscan_memcg_softlimit_re  	TP_ARGS(nr_reclaimed)  ); +TRACE_EVENT(mm_shrink_slab_start, +	TP_PROTO(struct shrinker *shr, struct shrink_control *sc, +		long nr_objects_to_shrink, unsigned long pgs_scanned, +		unsigned long lru_pgs, unsigned long cache_items, +		unsigned long long delta, unsigned long total_scan), + +	TP_ARGS(shr, sc, nr_objects_to_shrink, pgs_scanned, lru_pgs, +		cache_items, delta, total_scan), + +	TP_STRUCT__entry( +		__field(struct shrinker *, shr) +		__field(void *, shrink) +		__field(int, nid) +		__field(long, nr_objects_to_shrink) +		__field(gfp_t, gfp_flags) +		__field(unsigned long, pgs_scanned) +		__field(unsigned long, lru_pgs) +		__field(unsigned long, cache_items) +		__field(unsigned long long, delta) +		__field(unsigned long, total_scan) +	), + +	TP_fast_assign( +		__entry->shr = shr; +		__entry->shrink = shr->scan_objects; +		__entry->nid = sc->nid; +		__entry->nr_objects_to_shrink = nr_objects_to_shrink; +		__entry->gfp_flags = sc->gfp_mask; +		__entry->pgs_scanned = pgs_scanned; +		__entry->lru_pgs = lru_pgs; +		__entry->cache_items = cache_items; +		__entry->delta = delta; +		__entry->total_scan = total_scan; +	), + +	TP_printk("%pF %p: nid: %d objects to shrink %ld gfp_flags %s pgs_scanned %ld lru_pgs %ld cache items %ld delta %lld total_scan %ld", +		__entry->shrink, +		__entry->shr, +		__entry->nid, +		__entry->nr_objects_to_shrink, +		show_gfp_flags(__entry->gfp_flags), +		__entry->pgs_scanned, +		__entry->lru_pgs, +		__entry->cache_items, +		__entry->delta, +		__entry->total_scan) +); + +TRACE_EVENT(mm_shrink_slab_end, +	TP_PROTO(struct shrinker *shr, int nid, int shrinker_retval, +		long unused_scan_cnt, long new_scan_cnt, long total_scan), + +	TP_ARGS(shr, nid, shrinker_retval, unused_scan_cnt, new_scan_cnt, +		total_scan), + +	TP_STRUCT__entry( +		__field(struct shrinker *, shr) +		__field(int, nid) +		__field(void *, shrink) +		__field(long, unused_scan) +		__field(long, new_scan) +		__field(int, retval) +		__field(long, total_scan) +	), + +	TP_fast_assign( +		__entry->shr = shr; +		__entry->nid = nid; +		__entry->shrink = shr->scan_objects; +		__entry->unused_scan = unused_scan_cnt; +		__entry->new_scan = new_scan_cnt; +		__entry->retval = shrinker_retval; +		__entry->total_scan = total_scan; +	), + +	TP_printk("%pF %p: nid: %d unused scan count %ld new scan count %ld total_scan %ld last shrinker return val %d", +		__entry->shrink, +		__entry->shr, +		__entry->nid, +		__entry->unused_scan, +		__entry->new_scan, +		__entry->total_scan, +		__entry->retval) +);  DECLARE_EVENT_CLASS(mm_vmscan_lru_isolate_template, @@ -184,22 +270,18 @@ DECLARE_EVENT_CLASS(mm_vmscan_lru_isolate_template,  		unsigned long nr_requested,  		unsigned long nr_scanned,  		unsigned long nr_taken, -		unsigned long nr_lumpy_taken, -		unsigned long nr_lumpy_dirty, -		unsigned long nr_lumpy_failed, -		int isolate_mode), +		isolate_mode_t isolate_mode, +		int file), -	TP_ARGS(order, nr_requested, nr_scanned, nr_taken, nr_lumpy_taken, nr_lumpy_dirty, nr_lumpy_failed, isolate_mode), +	TP_ARGS(order, nr_requested, nr_scanned, nr_taken, isolate_mode, file),  	TP_STRUCT__entry(  		__field(int, order)  		__field(unsigned long, nr_requested)  		__field(unsigned long, nr_scanned)  		__field(unsigned long, nr_taken) -		__field(unsigned long, nr_lumpy_taken) -		__field(unsigned long, nr_lumpy_dirty) -		__field(unsigned long, nr_lumpy_failed) -		__field(int, isolate_mode) +		__field(isolate_mode_t, isolate_mode) +		__field(int, file)  	),  	TP_fast_assign( @@ -207,21 +289,17 @@ DECLARE_EVENT_CLASS(mm_vmscan_lru_isolate_template,  		__entry->nr_requested = nr_requested;  		__entry->nr_scanned = nr_scanned;  		__entry->nr_taken = nr_taken; -		__entry->nr_lumpy_taken = nr_lumpy_taken; -		__entry->nr_lumpy_dirty = nr_lumpy_dirty; -		__entry->nr_lumpy_failed = nr_lumpy_failed;  		__entry->isolate_mode = isolate_mode; +		__entry->file = file;  	), -	TP_printk("isolate_mode=%d order=%d nr_requested=%lu nr_scanned=%lu nr_taken=%lu contig_taken=%lu contig_dirty=%lu contig_failed=%lu", +	TP_printk("isolate_mode=%d order=%d nr_requested=%lu nr_scanned=%lu nr_taken=%lu file=%d",  		__entry->isolate_mode,  		__entry->order,  		__entry->nr_requested,  		__entry->nr_scanned,  		__entry->nr_taken, -		__entry->nr_lumpy_taken, -		__entry->nr_lumpy_dirty, -		__entry->nr_lumpy_failed) +		__entry->file)  );  DEFINE_EVENT(mm_vmscan_lru_isolate_template, mm_vmscan_lru_isolate, @@ -230,12 +308,10 @@ DEFINE_EVENT(mm_vmscan_lru_isolate_template, mm_vmscan_lru_isolate,  		unsigned long nr_requested,  		unsigned long nr_scanned,  		unsigned long nr_taken, -		unsigned long nr_lumpy_taken, -		unsigned long nr_lumpy_dirty, -		unsigned long nr_lumpy_failed, -		int isolate_mode), +		isolate_mode_t isolate_mode, +		int file), -	TP_ARGS(order, nr_requested, nr_scanned, nr_taken, nr_lumpy_taken, nr_lumpy_dirty, nr_lumpy_failed, isolate_mode) +	TP_ARGS(order, nr_requested, nr_scanned, nr_taken, isolate_mode, file)  ); @@ -245,12 +321,10 @@ DEFINE_EVENT(mm_vmscan_lru_isolate_template, mm_vmscan_memcg_isolate,  		unsigned long nr_requested,  		unsigned long nr_scanned,  		unsigned long nr_taken, -		unsigned long nr_lumpy_taken, -		unsigned long nr_lumpy_dirty, -		unsigned long nr_lumpy_failed, -		int isolate_mode), +		isolate_mode_t isolate_mode, +		int file), -	TP_ARGS(order, nr_requested, nr_scanned, nr_taken, nr_lumpy_taken, nr_lumpy_dirty, nr_lumpy_failed, isolate_mode) +	TP_ARGS(order, nr_requested, nr_scanned, nr_taken, isolate_mode, file)  ); @@ -310,7 +384,6 @@ TRACE_EVENT(mm_vmscan_lru_shrink_inactive,  		show_reclaim_flags(__entry->reclaim_flags))  ); -  #endif /* _TRACE_VMSCAN_H */  /* This part must be outside protection */ diff --git a/include/trace/events/workqueue.h b/include/trace/events/workqueue.h index 7d497291c85..bf0e18ba6cf 100644 --- a/include/trace/events/workqueue.h +++ b/include/trace/events/workqueue.h @@ -27,7 +27,7 @@ DECLARE_EVENT_CLASS(workqueue_work,  /**   * workqueue_queue_work - called when a work gets queued   * @req_cpu:	the requested cpu - * @cwq:	pointer to struct cpu_workqueue_struct + * @pwq:	pointer to struct pool_workqueue   * @work:	pointer to struct work_struct   *   * This event occurs when a work is queued immediately or once a @@ -36,10 +36,10 @@ DECLARE_EVENT_CLASS(workqueue_work,   */  TRACE_EVENT(workqueue_queue_work, -	TP_PROTO(unsigned int req_cpu, struct cpu_workqueue_struct *cwq, +	TP_PROTO(unsigned int req_cpu, struct pool_workqueue *pwq,  		 struct work_struct *work), -	TP_ARGS(req_cpu, cwq, work), +	TP_ARGS(req_cpu, pwq, work),  	TP_STRUCT__entry(  		__field( void *,	work	) @@ -52,9 +52,9 @@ TRACE_EVENT(workqueue_queue_work,  	TP_fast_assign(  		__entry->work		= work;  		__entry->function	= work->func; -		__entry->workqueue	= cwq->wq; +		__entry->workqueue	= pwq->wq;  		__entry->req_cpu	= req_cpu; -		__entry->cpu		= cwq->gcwq->cpu; +		__entry->cpu		= pwq->pool->cpu;  	),  	TP_printk("work struct=%p function=%pf workqueue=%p req_cpu=%u cpu=%u", @@ -103,7 +103,7 @@ TRACE_EVENT(workqueue_execute_start,  );  /** - * workqueue_execute_end - called immediately before the workqueue callback + * workqueue_execute_end - called immediately after the workqueue callback   * @work:	pointer to struct work_struct   *   * Allows to track workqueue execution. diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h index 89a2b2db437..cee02d65ab3 100644 --- a/include/trace/events/writeback.h +++ b/include/trace/events/writeback.h @@ -4,12 +4,144 @@  #if !defined(_TRACE_WRITEBACK_H) || defined(TRACE_HEADER_MULTI_READ)  #define _TRACE_WRITEBACK_H +#include <linux/tracepoint.h>  #include <linux/backing-dev.h> -#include <linux/device.h>  #include <linux/writeback.h> +#define show_inode_state(state)					\ +	__print_flags(state, "|",				\ +		{I_DIRTY_SYNC,		"I_DIRTY_SYNC"},	\ +		{I_DIRTY_DATASYNC,	"I_DIRTY_DATASYNC"},	\ +		{I_DIRTY_PAGES,		"I_DIRTY_PAGES"},	\ +		{I_NEW,			"I_NEW"},		\ +		{I_WILL_FREE,		"I_WILL_FREE"},		\ +		{I_FREEING,		"I_FREEING"},		\ +		{I_CLEAR,		"I_CLEAR"},		\ +		{I_SYNC,		"I_SYNC"},		\ +		{I_REFERENCED,		"I_REFERENCED"}		\ +	) + +#define WB_WORK_REASON							\ +		{WB_REASON_BACKGROUND,		"background"},		\ +		{WB_REASON_TRY_TO_FREE_PAGES,	"try_to_free_pages"},	\ +		{WB_REASON_SYNC,		"sync"},		\ +		{WB_REASON_PERIODIC,		"periodic"},		\ +		{WB_REASON_LAPTOP_TIMER,	"laptop_timer"},	\ +		{WB_REASON_FREE_MORE_MEM,	"free_more_memory"},	\ +		{WB_REASON_FS_FREE_SPACE,	"fs_free_space"},	\ +		{WB_REASON_FORKER_THREAD,	"forker_thread"} +  struct wb_writeback_work; +TRACE_EVENT(writeback_dirty_page, + +	TP_PROTO(struct page *page, struct address_space *mapping), + +	TP_ARGS(page, mapping), + +	TP_STRUCT__entry ( +		__array(char, name, 32) +		__field(unsigned long, ino) +		__field(pgoff_t, index) +	), + +	TP_fast_assign( +		strncpy(__entry->name, +			mapping ? dev_name(mapping->backing_dev_info->dev) : "(unknown)", 32); +		__entry->ino = mapping ? mapping->host->i_ino : 0; +		__entry->index = page->index; +	), + +	TP_printk("bdi %s: ino=%lu index=%lu", +		__entry->name, +		__entry->ino, +		__entry->index +	) +); + +DECLARE_EVENT_CLASS(writeback_dirty_inode_template, + +	TP_PROTO(struct inode *inode, int flags), + +	TP_ARGS(inode, flags), + +	TP_STRUCT__entry ( +		__array(char, name, 32) +		__field(unsigned long, ino) +		__field(unsigned long, flags) +	), + +	TP_fast_assign( +		struct backing_dev_info *bdi = inode->i_mapping->backing_dev_info; + +		/* may be called for files on pseudo FSes w/ unregistered bdi */ +		strncpy(__entry->name, +			bdi->dev ? dev_name(bdi->dev) : "(unknown)", 32); +		__entry->ino		= inode->i_ino; +		__entry->flags		= flags; +	), + +	TP_printk("bdi %s: ino=%lu flags=%s", +		__entry->name, +		__entry->ino, +		show_inode_state(__entry->flags) +	) +); + +DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode_start, + +	TP_PROTO(struct inode *inode, int flags), + +	TP_ARGS(inode, flags) +); + +DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode, + +	TP_PROTO(struct inode *inode, int flags), + +	TP_ARGS(inode, flags) +); + +DECLARE_EVENT_CLASS(writeback_write_inode_template, + +	TP_PROTO(struct inode *inode, struct writeback_control *wbc), + +	TP_ARGS(inode, wbc), + +	TP_STRUCT__entry ( +		__array(char, name, 32) +		__field(unsigned long, ino) +		__field(int, sync_mode) +	), + +	TP_fast_assign( +		strncpy(__entry->name, +			dev_name(inode->i_mapping->backing_dev_info->dev), 32); +		__entry->ino		= inode->i_ino; +		__entry->sync_mode	= wbc->sync_mode; +	), + +	TP_printk("bdi %s: ino=%lu sync_mode=%d", +		__entry->name, +		__entry->ino, +		__entry->sync_mode +	) +); + +DEFINE_EVENT(writeback_write_inode_template, writeback_write_inode_start, + +	TP_PROTO(struct inode *inode, struct writeback_control *wbc), + +	TP_ARGS(inode, wbc) +); + +DEFINE_EVENT(writeback_write_inode_template, writeback_write_inode, + +	TP_PROTO(struct inode *inode, struct writeback_control *wbc), + +	TP_ARGS(inode, wbc) +); +  DECLARE_EVENT_CLASS(writeback_work_class,  	TP_PROTO(struct backing_dev_info *bdi, struct wb_writeback_work *work),  	TP_ARGS(bdi, work), @@ -21,34 +153,42 @@ DECLARE_EVENT_CLASS(writeback_work_class,  		__field(int, for_kupdate)  		__field(int, range_cyclic)  		__field(int, for_background) +		__field(int, reason)  	),  	TP_fast_assign( -		strncpy(__entry->name, dev_name(bdi->dev), 32); +		struct device *dev = bdi->dev; +		if (!dev) +			dev = default_backing_dev_info.dev; +		strncpy(__entry->name, dev_name(dev), 32);  		__entry->nr_pages = work->nr_pages;  		__entry->sb_dev = work->sb ? work->sb->s_dev : 0;  		__entry->sync_mode = work->sync_mode;  		__entry->for_kupdate = work->for_kupdate;  		__entry->range_cyclic = work->range_cyclic;  		__entry->for_background	= work->for_background; +		__entry->reason = work->reason;  	),  	TP_printk("bdi %s: sb_dev %d:%d nr_pages=%ld sync_mode=%d " -		  "kupdate=%d range_cyclic=%d background=%d", +		  "kupdate=%d range_cyclic=%d background=%d reason=%s",  		  __entry->name,  		  MAJOR(__entry->sb_dev), MINOR(__entry->sb_dev),  		  __entry->nr_pages,  		  __entry->sync_mode,  		  __entry->for_kupdate,  		  __entry->range_cyclic, -		  __entry->for_background +		  __entry->for_background, +		  __print_symbolic(__entry->reason, WB_WORK_REASON)  	)  );  #define DEFINE_WRITEBACK_WORK_EVENT(name) \  DEFINE_EVENT(writeback_work_class, name, \  	TP_PROTO(struct backing_dev_info *bdi, struct wb_writeback_work *work), \  	TP_ARGS(bdi, work)) -DEFINE_WRITEBACK_WORK_EVENT(writeback_nothread);  DEFINE_WRITEBACK_WORK_EVENT(writeback_queue);  DEFINE_WRITEBACK_WORK_EVENT(writeback_exec); +DEFINE_WRITEBACK_WORK_EVENT(writeback_start); +DEFINE_WRITEBACK_WORK_EVENT(writeback_written); +DEFINE_WRITEBACK_WORK_EVENT(writeback_wait);  TRACE_EVENT(writeback_pages_written,  	TP_PROTO(long pages_written), @@ -81,12 +221,9 @@ DEFINE_EVENT(writeback_class, name, \  	TP_ARGS(bdi))  DEFINE_WRITEBACK_EVENT(writeback_nowork); -DEFINE_WRITEBACK_EVENT(writeback_wake_thread); -DEFINE_WRITEBACK_EVENT(writeback_wake_forker_thread); +DEFINE_WRITEBACK_EVENT(writeback_wake_background);  DEFINE_WRITEBACK_EVENT(writeback_bdi_register);  DEFINE_WRITEBACK_EVENT(writeback_bdi_unregister); -DEFINE_WRITEBACK_EVENT(writeback_thread_start); -DEFINE_WRITEBACK_EVENT(writeback_thread_stop);  DECLARE_EVENT_CLASS(wbc_class,  	TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), @@ -100,8 +237,6 @@ DECLARE_EVENT_CLASS(wbc_class,  		__field(int, for_background)  		__field(int, for_reclaim)  		__field(int, range_cyclic) -		__field(int, more_io) -		__field(unsigned long, older_than_this)  		__field(long, range_start)  		__field(long, range_end)  	), @@ -115,15 +250,12 @@ DECLARE_EVENT_CLASS(wbc_class,  		__entry->for_background	= wbc->for_background;  		__entry->for_reclaim	= wbc->for_reclaim;  		__entry->range_cyclic	= wbc->range_cyclic; -		__entry->more_io	= wbc->more_io; -		__entry->older_than_this = wbc->older_than_this ? -						*wbc->older_than_this : 0;  		__entry->range_start	= (long)wbc->range_start;  		__entry->range_end	= (long)wbc->range_end;  	),  	TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d " -		"bgrd=%d reclm=%d cyclic=%d more=%d older=0x%lx " +		"bgrd=%d reclm=%d cyclic=%d "  		"start=0x%lx end=0x%lx",  		__entry->name,  		__entry->nr_to_write, @@ -133,8 +265,6 @@ DECLARE_EVENT_CLASS(wbc_class,  		__entry->for_background,  		__entry->for_reclaim,  		__entry->range_cyclic, -		__entry->more_io, -		__entry->older_than_this,  		__entry->range_start,  		__entry->range_end)  ) @@ -143,14 +273,239 @@ DECLARE_EVENT_CLASS(wbc_class,  DEFINE_EVENT(wbc_class, name, \  	TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), \  	TP_ARGS(wbc, bdi)) -DEFINE_WBC_EVENT(wbc_writeback_start); -DEFINE_WBC_EVENT(wbc_writeback_written); -DEFINE_WBC_EVENT(wbc_writeback_wait); -DEFINE_WBC_EVENT(wbc_balance_dirty_start); -DEFINE_WBC_EVENT(wbc_balance_dirty_written); -DEFINE_WBC_EVENT(wbc_balance_dirty_wait);  DEFINE_WBC_EVENT(wbc_writepage); +TRACE_EVENT(writeback_queue_io, +	TP_PROTO(struct bdi_writeback *wb, +		 struct wb_writeback_work *work, +		 int moved), +	TP_ARGS(wb, work, moved), +	TP_STRUCT__entry( +		__array(char,		name, 32) +		__field(unsigned long,	older) +		__field(long,		age) +		__field(int,		moved) +		__field(int,		reason) +	), +	TP_fast_assign( +		unsigned long *older_than_this = work->older_than_this; +		strncpy(__entry->name, dev_name(wb->bdi->dev), 32); +		__entry->older	= older_than_this ?  *older_than_this : 0; +		__entry->age	= older_than_this ? +				  (jiffies - *older_than_this) * 1000 / HZ : -1; +		__entry->moved	= moved; +		__entry->reason	= work->reason; +	), +	TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s", +		__entry->name, +		__entry->older,	/* older_than_this in jiffies */ +		__entry->age,	/* older_than_this in relative milliseconds */ +		__entry->moved, +		__print_symbolic(__entry->reason, WB_WORK_REASON) +	) +); + +TRACE_EVENT(global_dirty_state, + +	TP_PROTO(unsigned long background_thresh, +		 unsigned long dirty_thresh +	), + +	TP_ARGS(background_thresh, +		dirty_thresh +	), + +	TP_STRUCT__entry( +		__field(unsigned long,	nr_dirty) +		__field(unsigned long,	nr_writeback) +		__field(unsigned long,	nr_unstable) +		__field(unsigned long,	background_thresh) +		__field(unsigned long,	dirty_thresh) +		__field(unsigned long,	dirty_limit) +		__field(unsigned long,	nr_dirtied) +		__field(unsigned long,	nr_written) +	), + +	TP_fast_assign( +		__entry->nr_dirty	= global_page_state(NR_FILE_DIRTY); +		__entry->nr_writeback	= global_page_state(NR_WRITEBACK); +		__entry->nr_unstable	= global_page_state(NR_UNSTABLE_NFS); +		__entry->nr_dirtied	= global_page_state(NR_DIRTIED); +		__entry->nr_written	= global_page_state(NR_WRITTEN); +		__entry->background_thresh = background_thresh; +		__entry->dirty_thresh	= dirty_thresh; +		__entry->dirty_limit = global_dirty_limit; +	), + +	TP_printk("dirty=%lu writeback=%lu unstable=%lu " +		  "bg_thresh=%lu thresh=%lu limit=%lu " +		  "dirtied=%lu written=%lu", +		  __entry->nr_dirty, +		  __entry->nr_writeback, +		  __entry->nr_unstable, +		  __entry->background_thresh, +		  __entry->dirty_thresh, +		  __entry->dirty_limit, +		  __entry->nr_dirtied, +		  __entry->nr_written +	) +); + +#define KBps(x)			((x) << (PAGE_SHIFT - 10)) + +TRACE_EVENT(bdi_dirty_ratelimit, + +	TP_PROTO(struct backing_dev_info *bdi, +		 unsigned long dirty_rate, +		 unsigned long task_ratelimit), + +	TP_ARGS(bdi, dirty_rate, task_ratelimit), + +	TP_STRUCT__entry( +		__array(char,		bdi, 32) +		__field(unsigned long,	write_bw) +		__field(unsigned long,	avg_write_bw) +		__field(unsigned long,	dirty_rate) +		__field(unsigned long,	dirty_ratelimit) +		__field(unsigned long,	task_ratelimit) +		__field(unsigned long,	balanced_dirty_ratelimit) +	), + +	TP_fast_assign( +		strlcpy(__entry->bdi, dev_name(bdi->dev), 32); +		__entry->write_bw	= KBps(bdi->write_bandwidth); +		__entry->avg_write_bw	= KBps(bdi->avg_write_bandwidth); +		__entry->dirty_rate	= KBps(dirty_rate); +		__entry->dirty_ratelimit = KBps(bdi->dirty_ratelimit); +		__entry->task_ratelimit	= KBps(task_ratelimit); +		__entry->balanced_dirty_ratelimit = +					  KBps(bdi->balanced_dirty_ratelimit); +	), + +	TP_printk("bdi %s: " +		  "write_bw=%lu awrite_bw=%lu dirty_rate=%lu " +		  "dirty_ratelimit=%lu task_ratelimit=%lu " +		  "balanced_dirty_ratelimit=%lu", +		  __entry->bdi, +		  __entry->write_bw,		/* write bandwidth */ +		  __entry->avg_write_bw,	/* avg write bandwidth */ +		  __entry->dirty_rate,		/* bdi dirty rate */ +		  __entry->dirty_ratelimit,	/* base ratelimit */ +		  __entry->task_ratelimit, /* ratelimit with position control */ +		  __entry->balanced_dirty_ratelimit /* the balanced ratelimit */ +	) +); + +TRACE_EVENT(balance_dirty_pages, + +	TP_PROTO(struct backing_dev_info *bdi, +		 unsigned long thresh, +		 unsigned long bg_thresh, +		 unsigned long dirty, +		 unsigned long bdi_thresh, +		 unsigned long bdi_dirty, +		 unsigned long dirty_ratelimit, +		 unsigned long task_ratelimit, +		 unsigned long dirtied, +		 unsigned long period, +		 long pause, +		 unsigned long start_time), + +	TP_ARGS(bdi, thresh, bg_thresh, dirty, bdi_thresh, bdi_dirty, +		dirty_ratelimit, task_ratelimit, +		dirtied, period, pause, start_time), + +	TP_STRUCT__entry( +		__array(	 char,	bdi, 32) +		__field(unsigned long,	limit) +		__field(unsigned long,	setpoint) +		__field(unsigned long,	dirty) +		__field(unsigned long,	bdi_setpoint) +		__field(unsigned long,	bdi_dirty) +		__field(unsigned long,	dirty_ratelimit) +		__field(unsigned long,	task_ratelimit) +		__field(unsigned int,	dirtied) +		__field(unsigned int,	dirtied_pause) +		__field(unsigned long,	paused) +		__field(	 long,	pause) +		__field(unsigned long,	period) +		__field(	 long,	think) +	), + +	TP_fast_assign( +		unsigned long freerun = (thresh + bg_thresh) / 2; +		strlcpy(__entry->bdi, dev_name(bdi->dev), 32); + +		__entry->limit		= global_dirty_limit; +		__entry->setpoint	= (global_dirty_limit + freerun) / 2; +		__entry->dirty		= dirty; +		__entry->bdi_setpoint	= __entry->setpoint * +						bdi_thresh / (thresh + 1); +		__entry->bdi_dirty	= bdi_dirty; +		__entry->dirty_ratelimit = KBps(dirty_ratelimit); +		__entry->task_ratelimit	= KBps(task_ratelimit); +		__entry->dirtied	= dirtied; +		__entry->dirtied_pause	= current->nr_dirtied_pause; +		__entry->think		= current->dirty_paused_when == 0 ? 0 : +			 (long)(jiffies - current->dirty_paused_when) * 1000/HZ; +		__entry->period		= period * 1000 / HZ; +		__entry->pause		= pause * 1000 / HZ; +		__entry->paused		= (jiffies - start_time) * 1000 / HZ; +	), + + +	TP_printk("bdi %s: " +		  "limit=%lu setpoint=%lu dirty=%lu " +		  "bdi_setpoint=%lu bdi_dirty=%lu " +		  "dirty_ratelimit=%lu task_ratelimit=%lu " +		  "dirtied=%u dirtied_pause=%u " +		  "paused=%lu pause=%ld period=%lu think=%ld", +		  __entry->bdi, +		  __entry->limit, +		  __entry->setpoint, +		  __entry->dirty, +		  __entry->bdi_setpoint, +		  __entry->bdi_dirty, +		  __entry->dirty_ratelimit, +		  __entry->task_ratelimit, +		  __entry->dirtied, +		  __entry->dirtied_pause, +		  __entry->paused,	/* ms */ +		  __entry->pause,	/* ms */ +		  __entry->period,	/* ms */ +		  __entry->think	/* ms */ +	  ) +); + +TRACE_EVENT(writeback_sb_inodes_requeue, + +	TP_PROTO(struct inode *inode), +	TP_ARGS(inode), + +	TP_STRUCT__entry( +		__array(char, name, 32) +		__field(unsigned long, ino) +		__field(unsigned long, state) +		__field(unsigned long, dirtied_when) +	), + +	TP_fast_assign( +		strncpy(__entry->name, +		        dev_name(inode_to_bdi(inode)->dev), 32); +		__entry->ino		= inode->i_ino; +		__entry->state		= inode->i_state; +		__entry->dirtied_when	= inode->dirtied_when; +	), + +	TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu", +		  __entry->name, +		  __entry->ino, +		  show_inode_state(__entry->state), +		  __entry->dirtied_when, +		  (jiffies - __entry->dirtied_when) / HZ +	) +); +  DECLARE_EVENT_CLASS(writeback_congest_waited_template,  	TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed), @@ -186,6 +541,63 @@ DEFINE_EVENT(writeback_congest_waited_template, writeback_wait_iff_congested,  	TP_ARGS(usec_timeout, usec_delayed)  ); +DECLARE_EVENT_CLASS(writeback_single_inode_template, + +	TP_PROTO(struct inode *inode, +		 struct writeback_control *wbc, +		 unsigned long nr_to_write +	), + +	TP_ARGS(inode, wbc, nr_to_write), + +	TP_STRUCT__entry( +		__array(char, name, 32) +		__field(unsigned long, ino) +		__field(unsigned long, state) +		__field(unsigned long, dirtied_when) +		__field(unsigned long, writeback_index) +		__field(long, nr_to_write) +		__field(unsigned long, wrote) +	), + +	TP_fast_assign( +		strncpy(__entry->name, +			dev_name(inode_to_bdi(inode)->dev), 32); +		__entry->ino		= inode->i_ino; +		__entry->state		= inode->i_state; +		__entry->dirtied_when	= inode->dirtied_when; +		__entry->writeback_index = inode->i_mapping->writeback_index; +		__entry->nr_to_write	= nr_to_write; +		__entry->wrote		= nr_to_write - wbc->nr_to_write; +	), + +	TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu " +		  "index=%lu to_write=%ld wrote=%lu", +		  __entry->name, +		  __entry->ino, +		  show_inode_state(__entry->state), +		  __entry->dirtied_when, +		  (jiffies - __entry->dirtied_when) / HZ, +		  __entry->writeback_index, +		  __entry->nr_to_write, +		  __entry->wrote +	) +); + +DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode_start, +	TP_PROTO(struct inode *inode, +		 struct writeback_control *wbc, +		 unsigned long nr_to_write), +	TP_ARGS(inode, wbc, nr_to_write) +); + +DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode, +	TP_PROTO(struct inode *inode, +		 struct writeback_control *wbc, +		 unsigned long nr_to_write), +	TP_ARGS(inode, wbc, nr_to_write) +); +  #endif /* _TRACE_WRITEBACK_H */  /* This part must be outside protection */ diff --git a/include/trace/events/xen.h b/include/trace/events/xen.h new file mode 100644 index 00000000000..d06b6da5c1e --- /dev/null +++ b/include/trace/events/xen.h @@ -0,0 +1,516 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM xen + +#if !defined(_TRACE_XEN_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_XEN_H + +#include <linux/tracepoint.h> +#include <asm/paravirt_types.h> +#include <asm/xen/trace_types.h> + +struct multicall_entry; + +/* Multicalls */ +DECLARE_EVENT_CLASS(xen_mc__batch, +	    TP_PROTO(enum paravirt_lazy_mode mode), +	    TP_ARGS(mode), +	    TP_STRUCT__entry( +		    __field(enum paravirt_lazy_mode, mode) +		    ), +	    TP_fast_assign(__entry->mode = mode), +	    TP_printk("start batch LAZY_%s", +		      (__entry->mode == PARAVIRT_LAZY_MMU) ? "MMU" : +		      (__entry->mode == PARAVIRT_LAZY_CPU) ? "CPU" : "NONE") +	); +#define DEFINE_XEN_MC_BATCH(name)			\ +	DEFINE_EVENT(xen_mc__batch, name,		\ +		TP_PROTO(enum paravirt_lazy_mode mode),	\ +		     TP_ARGS(mode)) + +DEFINE_XEN_MC_BATCH(xen_mc_batch); +DEFINE_XEN_MC_BATCH(xen_mc_issue); + +TRACE_EVENT(xen_mc_entry, +	    TP_PROTO(struct multicall_entry *mc, unsigned nargs), +	    TP_ARGS(mc, nargs), +	    TP_STRUCT__entry( +		    __field(unsigned int, op) +		    __field(unsigned int, nargs) +		    __array(unsigned long, args, 6) +		    ), +	    TP_fast_assign(__entry->op = mc->op; +			   __entry->nargs = nargs; +			   memcpy(__entry->args, mc->args, sizeof(unsigned long) * nargs); +			   memset(__entry->args + nargs, 0, sizeof(unsigned long) * (6 - nargs)); +		    ), +	    TP_printk("op %u%s args [%lx, %lx, %lx, %lx, %lx, %lx]", +		      __entry->op, xen_hypercall_name(__entry->op), +		      __entry->args[0], __entry->args[1], __entry->args[2], +		      __entry->args[3], __entry->args[4], __entry->args[5]) +	); + +TRACE_EVENT(xen_mc_entry_alloc, +	    TP_PROTO(size_t args), +	    TP_ARGS(args), +	    TP_STRUCT__entry( +		    __field(size_t, args) +		    ), +	    TP_fast_assign(__entry->args = args), +	    TP_printk("alloc entry %zu arg bytes", __entry->args) +	); + +TRACE_EVENT(xen_mc_callback, +	    TP_PROTO(xen_mc_callback_fn_t fn, void *data), +	    TP_ARGS(fn, data), +	    TP_STRUCT__entry( +		    __field(xen_mc_callback_fn_t, fn) +		    __field(void *, data) +		    ), +	    TP_fast_assign( +		    __entry->fn = fn; +		    __entry->data = data; +		    ), +	    TP_printk("callback %pf, data %p", +		      __entry->fn, __entry->data) +	); + +TRACE_EVENT(xen_mc_flush_reason, +	    TP_PROTO(enum xen_mc_flush_reason reason), +	    TP_ARGS(reason), +	    TP_STRUCT__entry( +		    __field(enum xen_mc_flush_reason, reason) +		    ), +	    TP_fast_assign(__entry->reason = reason), +	    TP_printk("flush reason %s", +		      (__entry->reason == XEN_MC_FL_NONE) ? "NONE" : +		      (__entry->reason == XEN_MC_FL_BATCH) ? "BATCH" : +		      (__entry->reason == XEN_MC_FL_ARGS) ? "ARGS" : +		      (__entry->reason == XEN_MC_FL_CALLBACK) ? "CALLBACK" : "??") +	); + +TRACE_EVENT(xen_mc_flush, +	    TP_PROTO(unsigned mcidx, unsigned argidx, unsigned cbidx), +	    TP_ARGS(mcidx, argidx, cbidx), +	    TP_STRUCT__entry( +		    __field(unsigned, mcidx) +		    __field(unsigned, argidx) +		    __field(unsigned, cbidx) +		    ), +	    TP_fast_assign(__entry->mcidx = mcidx; +			   __entry->argidx = argidx; +			   __entry->cbidx = cbidx), +	    TP_printk("flushing %u hypercalls, %u arg bytes, %u callbacks", +		      __entry->mcidx, __entry->argidx, __entry->cbidx) +	); + +TRACE_EVENT(xen_mc_extend_args, +	    TP_PROTO(unsigned long op, size_t args, enum xen_mc_extend_args res), +	    TP_ARGS(op, args, res), +	    TP_STRUCT__entry( +		    __field(unsigned int, op) +		    __field(size_t, args) +		    __field(enum xen_mc_extend_args, res) +		    ), +	    TP_fast_assign(__entry->op = op; +			   __entry->args = args; +			   __entry->res = res), +	    TP_printk("extending op %u%s by %zu bytes res %s", +		      __entry->op, xen_hypercall_name(__entry->op), +		      __entry->args, +		      __entry->res == XEN_MC_XE_OK ? "OK" : +		      __entry->res == XEN_MC_XE_BAD_OP ? "BAD_OP" : +		      __entry->res == XEN_MC_XE_NO_SPACE ? "NO_SPACE" : "???") +	); + +/* mmu */ +DECLARE_EVENT_CLASS(xen_mmu__set_pte, +	    TP_PROTO(pte_t *ptep, pte_t pteval), +	    TP_ARGS(ptep, pteval), +	    TP_STRUCT__entry( +		    __field(pte_t *, ptep) +		    __field(pteval_t, pteval) +		    ), +	    TP_fast_assign(__entry->ptep = ptep; +			   __entry->pteval = pteval.pte), +	    TP_printk("ptep %p pteval %0*llx (raw %0*llx)", +		      __entry->ptep, +		      (int)sizeof(pteval_t) * 2, (unsigned long long)pte_val(native_make_pte(__entry->pteval)), +		      (int)sizeof(pteval_t) * 2, (unsigned long long)__entry->pteval) +	); + +#define DEFINE_XEN_MMU_SET_PTE(name)				\ +	DEFINE_EVENT(xen_mmu__set_pte, name,			\ +		     TP_PROTO(pte_t *ptep, pte_t pteval),	\ +		     TP_ARGS(ptep, pteval)) + +DEFINE_XEN_MMU_SET_PTE(xen_mmu_set_pte); +DEFINE_XEN_MMU_SET_PTE(xen_mmu_set_pte_atomic); + +TRACE_EVENT(xen_mmu_set_domain_pte, +	    TP_PROTO(pte_t *ptep, pte_t pteval, unsigned domid), +	    TP_ARGS(ptep, pteval, domid), +	    TP_STRUCT__entry( +		    __field(pte_t *, ptep) +		    __field(pteval_t, pteval) +		    __field(unsigned, domid) +		    ), +	    TP_fast_assign(__entry->ptep = ptep; +			   __entry->pteval = pteval.pte; +			   __entry->domid = domid), +	    TP_printk("ptep %p pteval %0*llx (raw %0*llx) domid %u", +		      __entry->ptep, +		      (int)sizeof(pteval_t) * 2, (unsigned long long)pte_val(native_make_pte(__entry->pteval)), +		      (int)sizeof(pteval_t) * 2, (unsigned long long)__entry->pteval, +		      __entry->domid) +	); + +TRACE_EVENT(xen_mmu_set_pte_at, +	    TP_PROTO(struct mm_struct *mm, unsigned long addr, +		     pte_t *ptep, pte_t pteval), +	    TP_ARGS(mm, addr, ptep, pteval), +	    TP_STRUCT__entry( +		    __field(struct mm_struct *, mm) +		    __field(unsigned long, addr) +		    __field(pte_t *, ptep) +		    __field(pteval_t, pteval) +		    ), +	    TP_fast_assign(__entry->mm = mm; +			   __entry->addr = addr; +			   __entry->ptep = ptep; +			   __entry->pteval = pteval.pte), +	    TP_printk("mm %p addr %lx ptep %p pteval %0*llx (raw %0*llx)", +		      __entry->mm, __entry->addr, __entry->ptep, +		      (int)sizeof(pteval_t) * 2, (unsigned long long)pte_val(native_make_pte(__entry->pteval)), +		      (int)sizeof(pteval_t) * 2, (unsigned long long)__entry->pteval) +	); + +TRACE_EVENT(xen_mmu_pte_clear, +	    TP_PROTO(struct mm_struct *mm, unsigned long addr, pte_t *ptep), +	    TP_ARGS(mm, addr, ptep), +	    TP_STRUCT__entry( +		    __field(struct mm_struct *, mm) +		    __field(unsigned long, addr) +		    __field(pte_t *, ptep) +		    ), +	    TP_fast_assign(__entry->mm = mm; +			   __entry->addr = addr; +			   __entry->ptep = ptep), +	    TP_printk("mm %p addr %lx ptep %p", +		      __entry->mm, __entry->addr, __entry->ptep) +	); + +TRACE_EVENT(xen_mmu_set_pmd, +	    TP_PROTO(pmd_t *pmdp, pmd_t pmdval), +	    TP_ARGS(pmdp, pmdval), +	    TP_STRUCT__entry( +		    __field(pmd_t *, pmdp) +		    __field(pmdval_t, pmdval) +		    ), +	    TP_fast_assign(__entry->pmdp = pmdp; +			   __entry->pmdval = pmdval.pmd), +	    TP_printk("pmdp %p pmdval %0*llx (raw %0*llx)", +		      __entry->pmdp, +		      (int)sizeof(pmdval_t) * 2, (unsigned long long)pmd_val(native_make_pmd(__entry->pmdval)), +		      (int)sizeof(pmdval_t) * 2, (unsigned long long)__entry->pmdval) +	); + +TRACE_EVENT(xen_mmu_pmd_clear, +	    TP_PROTO(pmd_t *pmdp), +	    TP_ARGS(pmdp), +	    TP_STRUCT__entry( +		    __field(pmd_t *, pmdp) +		    ), +	    TP_fast_assign(__entry->pmdp = pmdp), +	    TP_printk("pmdp %p", __entry->pmdp) +	); + +#if PAGETABLE_LEVELS >= 4 + +TRACE_EVENT(xen_mmu_set_pud, +	    TP_PROTO(pud_t *pudp, pud_t pudval), +	    TP_ARGS(pudp, pudval), +	    TP_STRUCT__entry( +		    __field(pud_t *, pudp) +		    __field(pudval_t, pudval) +		    ), +	    TP_fast_assign(__entry->pudp = pudp; +			   __entry->pudval = native_pud_val(pudval)), +	    TP_printk("pudp %p pudval %0*llx (raw %0*llx)", +		      __entry->pudp, +		      (int)sizeof(pudval_t) * 2, (unsigned long long)pud_val(native_make_pud(__entry->pudval)), +		      (int)sizeof(pudval_t) * 2, (unsigned long long)__entry->pudval) +	); + +TRACE_EVENT(xen_mmu_set_pgd, +	    TP_PROTO(pgd_t *pgdp, pgd_t *user_pgdp, pgd_t pgdval), +	    TP_ARGS(pgdp, user_pgdp, pgdval), +	    TP_STRUCT__entry( +		    __field(pgd_t *, pgdp) +		    __field(pgd_t *, user_pgdp) +		    __field(pgdval_t, pgdval) +		    ), +	    TP_fast_assign(__entry->pgdp = pgdp; +			   __entry->user_pgdp = user_pgdp; +			   __entry->pgdval = pgdval.pgd), +	    TP_printk("pgdp %p user_pgdp %p pgdval %0*llx (raw %0*llx)", +		      __entry->pgdp, __entry->user_pgdp, +		      (int)sizeof(pgdval_t) * 2, (unsigned long long)pgd_val(native_make_pgd(__entry->pgdval)), +		      (int)sizeof(pgdval_t) * 2, (unsigned long long)__entry->pgdval) +	); + +TRACE_EVENT(xen_mmu_pud_clear, +	    TP_PROTO(pud_t *pudp), +	    TP_ARGS(pudp), +	    TP_STRUCT__entry( +		    __field(pud_t *, pudp) +		    ), +	    TP_fast_assign(__entry->pudp = pudp), +	    TP_printk("pudp %p", __entry->pudp) +	); +#else + +TRACE_EVENT(xen_mmu_set_pud, +	    TP_PROTO(pud_t *pudp, pud_t pudval), +	    TP_ARGS(pudp, pudval), +	    TP_STRUCT__entry( +		    __field(pud_t *, pudp) +		    __field(pudval_t, pudval) +		    ), +	    TP_fast_assign(__entry->pudp = pudp; +			   __entry->pudval = native_pud_val(pudval)), +	    TP_printk("pudp %p pudval %0*llx (raw %0*llx)", +		      __entry->pudp, +		      (int)sizeof(pudval_t) * 2, (unsigned long long)pgd_val(native_make_pgd(__entry->pudval)), +		      (int)sizeof(pudval_t) * 2, (unsigned long long)__entry->pudval) +	); + +#endif + +TRACE_EVENT(xen_mmu_pgd_clear, +	    TP_PROTO(pgd_t *pgdp), +	    TP_ARGS(pgdp), +	    TP_STRUCT__entry( +		    __field(pgd_t *, pgdp) +		    ), +	    TP_fast_assign(__entry->pgdp = pgdp), +	    TP_printk("pgdp %p", __entry->pgdp) +	); + +DECLARE_EVENT_CLASS(xen_mmu_ptep_modify_prot, +	    TP_PROTO(struct mm_struct *mm, unsigned long addr, +		     pte_t *ptep, pte_t pteval), +	    TP_ARGS(mm, addr, ptep, pteval), +	    TP_STRUCT__entry( +		    __field(struct mm_struct *, mm) +		    __field(unsigned long, addr) +		    __field(pte_t *, ptep) +		    __field(pteval_t, pteval) +		    ), +	    TP_fast_assign(__entry->mm = mm; +			   __entry->addr = addr; +			   __entry->ptep = ptep; +			   __entry->pteval = pteval.pte), +	    TP_printk("mm %p addr %lx ptep %p pteval %0*llx (raw %0*llx)", +		      __entry->mm, __entry->addr, __entry->ptep, +		      (int)sizeof(pteval_t) * 2, (unsigned long long)pte_val(native_make_pte(__entry->pteval)), +		      (int)sizeof(pteval_t) * 2, (unsigned long long)__entry->pteval) +	); +#define DEFINE_XEN_MMU_PTEP_MODIFY_PROT(name)				\ +	DEFINE_EVENT(xen_mmu_ptep_modify_prot, name,			\ +		     TP_PROTO(struct mm_struct *mm, unsigned long addr,	\ +			      pte_t *ptep, pte_t pteval),		\ +		     TP_ARGS(mm, addr, ptep, pteval)) + +DEFINE_XEN_MMU_PTEP_MODIFY_PROT(xen_mmu_ptep_modify_prot_start); +DEFINE_XEN_MMU_PTEP_MODIFY_PROT(xen_mmu_ptep_modify_prot_commit); + +TRACE_EVENT(xen_mmu_alloc_ptpage, +	    TP_PROTO(struct mm_struct *mm, unsigned long pfn, unsigned level, bool pinned), +	    TP_ARGS(mm, pfn, level, pinned), +	    TP_STRUCT__entry( +		    __field(struct mm_struct *, mm) +		    __field(unsigned long, pfn) +		    __field(unsigned, level) +		    __field(bool, pinned) +		    ), +	    TP_fast_assign(__entry->mm = mm; +			   __entry->pfn = pfn; +			   __entry->level = level; +			   __entry->pinned = pinned), +	    TP_printk("mm %p  pfn %lx  level %d  %spinned", +		      __entry->mm, __entry->pfn, __entry->level, +		      __entry->pinned ? "" : "un") +	); + +TRACE_EVENT(xen_mmu_release_ptpage, +	    TP_PROTO(unsigned long pfn, unsigned level, bool pinned), +	    TP_ARGS(pfn, level, pinned), +	    TP_STRUCT__entry( +		    __field(unsigned long, pfn) +		    __field(unsigned, level) +		    __field(bool, pinned) +		    ), +	    TP_fast_assign(__entry->pfn = pfn; +			   __entry->level = level; +			   __entry->pinned = pinned), +	    TP_printk("pfn %lx  level %d  %spinned", +		      __entry->pfn, __entry->level, +		      __entry->pinned ? "" : "un") +	); + +DECLARE_EVENT_CLASS(xen_mmu_pgd, +	    TP_PROTO(struct mm_struct *mm, pgd_t *pgd), +	    TP_ARGS(mm, pgd), +	    TP_STRUCT__entry( +		    __field(struct mm_struct *, mm) +		    __field(pgd_t *, pgd) +		    ), +	    TP_fast_assign(__entry->mm = mm; +			   __entry->pgd = pgd), +	    TP_printk("mm %p pgd %p", __entry->mm, __entry->pgd) +	); +#define DEFINE_XEN_MMU_PGD_EVENT(name)				\ +	DEFINE_EVENT(xen_mmu_pgd, name,				\ +		TP_PROTO(struct mm_struct *mm, pgd_t *pgd),	\ +		     TP_ARGS(mm, pgd)) + +DEFINE_XEN_MMU_PGD_EVENT(xen_mmu_pgd_pin); +DEFINE_XEN_MMU_PGD_EVENT(xen_mmu_pgd_unpin); + +TRACE_EVENT(xen_mmu_flush_tlb_all, +	    TP_PROTO(int x), +	    TP_ARGS(x), +	    TP_STRUCT__entry(__array(char, x, 0)), +	    TP_fast_assign((void)x), +	    TP_printk("%s", "") +	); + +TRACE_EVENT(xen_mmu_flush_tlb, +	    TP_PROTO(int x), +	    TP_ARGS(x), +	    TP_STRUCT__entry(__array(char, x, 0)), +	    TP_fast_assign((void)x), +	    TP_printk("%s", "") +	); + +TRACE_EVENT(xen_mmu_flush_tlb_single, +	    TP_PROTO(unsigned long addr), +	    TP_ARGS(addr), +	    TP_STRUCT__entry( +		    __field(unsigned long, addr) +		    ), +	    TP_fast_assign(__entry->addr = addr), +	    TP_printk("addr %lx", __entry->addr) +	); + +TRACE_EVENT(xen_mmu_flush_tlb_others, +	    TP_PROTO(const struct cpumask *cpus, struct mm_struct *mm, +		     unsigned long addr, unsigned long end), +	    TP_ARGS(cpus, mm, addr, end), +	    TP_STRUCT__entry( +		    __field(unsigned, ncpus) +		    __field(struct mm_struct *, mm) +		    __field(unsigned long, addr) +		    __field(unsigned long, end) +		    ), +	    TP_fast_assign(__entry->ncpus = cpumask_weight(cpus); +			   __entry->mm = mm; +			   __entry->addr = addr, +			   __entry->end = end), +	    TP_printk("ncpus %d mm %p addr %lx, end %lx", +		      __entry->ncpus, __entry->mm, __entry->addr, __entry->end) +	); + +TRACE_EVENT(xen_mmu_write_cr3, +	    TP_PROTO(bool kernel, unsigned long cr3), +	    TP_ARGS(kernel, cr3), +	    TP_STRUCT__entry( +		    __field(bool, kernel) +		    __field(unsigned long, cr3) +		    ), +	    TP_fast_assign(__entry->kernel = kernel; +			   __entry->cr3 = cr3), +	    TP_printk("%s cr3 %lx", +		      __entry->kernel ? "kernel" : "user", __entry->cr3) +	); + + +/* CPU */ +TRACE_EVENT(xen_cpu_write_ldt_entry, +	    TP_PROTO(struct desc_struct *dt, int entrynum, u64 desc), +	    TP_ARGS(dt, entrynum, desc), +	    TP_STRUCT__entry( +		    __field(struct desc_struct *, dt) +		    __field(int, entrynum) +		    __field(u64, desc) +		    ), +	    TP_fast_assign(__entry->dt = dt; +			   __entry->entrynum = entrynum; +			   __entry->desc = desc; +		    ), +	    TP_printk("dt %p  entrynum %d  entry %016llx", +		      __entry->dt, __entry->entrynum, +		      (unsigned long long)__entry->desc) +	); + +TRACE_EVENT(xen_cpu_write_idt_entry, +	    TP_PROTO(gate_desc *dt, int entrynum, const gate_desc *ent), +	    TP_ARGS(dt, entrynum, ent), +	    TP_STRUCT__entry( +		    __field(gate_desc *, dt) +		    __field(int, entrynum) +		    ), +	    TP_fast_assign(__entry->dt = dt; +			   __entry->entrynum = entrynum; +		    ), +	    TP_printk("dt %p  entrynum %d", +		      __entry->dt, __entry->entrynum) +	); + +TRACE_EVENT(xen_cpu_load_idt, +	    TP_PROTO(const struct desc_ptr *desc), +	    TP_ARGS(desc), +	    TP_STRUCT__entry( +		    __field(unsigned long, addr) +		    ), +	    TP_fast_assign(__entry->addr = desc->address), +	    TP_printk("addr %lx", __entry->addr) +	); + +TRACE_EVENT(xen_cpu_write_gdt_entry, +	    TP_PROTO(struct desc_struct *dt, int entrynum, const void *desc, int type), +	    TP_ARGS(dt, entrynum, desc, type), +	    TP_STRUCT__entry( +		    __field(u64, desc) +		    __field(struct desc_struct *, dt) +		    __field(int, entrynum) +		    __field(int, type) +		    ), +	    TP_fast_assign(__entry->dt = dt; +			   __entry->entrynum = entrynum; +			   __entry->desc = *(u64 *)desc; +			   __entry->type = type; +		    ), +	    TP_printk("dt %p  entrynum %d  type %d  desc %016llx", +		      __entry->dt, __entry->entrynum, __entry->type, +		      (unsigned long long)__entry->desc) +	); + +TRACE_EVENT(xen_cpu_set_ldt, +	    TP_PROTO(const void *addr, unsigned entries), +	    TP_ARGS(addr, entries), +	    TP_STRUCT__entry( +		    __field(const void *, addr) +		    __field(unsigned, entries) +		    ), +	    TP_fast_assign(__entry->addr = addr; +			   __entry->entries = entries), +	    TP_printk("addr %p  entries %u", +		      __entry->addr, __entry->entries) +	); + + +#endif /*  _TRACE_XEN_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index a9377c0083a..26b4f2e1327 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h @@ -44,6 +44,12 @@  #undef __field_ext  #define __field_ext(type, item, filter_type)	type	item; +#undef __field_struct +#define __field_struct(type, item)	type	item; + +#undef __field_struct_ext +#define __field_struct_ext(type, item, filter_type)	type	item; +  #undef __array  #define __array(type, item, len)	type	item[len]; @@ -53,6 +59,9 @@  #undef __string  #define __string(item, src) __dynamic_array(char, item, -1) +#undef __bitmask +#define __bitmask(item, nr_bits) __dynamic_array(char, item, -1) +  #undef TP_STRUCT__entry  #define TP_STRUCT__entry(args...) args @@ -71,6 +80,10 @@  	static struct ftrace_event_call	__used		\  	__attribute__((__aligned__(4))) event_##name +#undef DEFINE_EVENT_FN +#define DEFINE_EVENT_FN(template, name, proto, args, reg, unreg)	\ +	DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) +  #undef DEFINE_EVENT_PRINT  #define DEFINE_EVENT_PRINT(template, name, proto, args, print)	\  	DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) @@ -82,6 +95,14 @@  	TRACE_EVENT(name, PARAMS(proto), PARAMS(args),			\  		PARAMS(tstruct), PARAMS(assign), PARAMS(print))		\ +#undef TRACE_EVENT_FLAGS +#define TRACE_EVENT_FLAGS(name, value)					\ +	__TRACE_EVENT_FLAGS(name, value) + +#undef TRACE_EVENT_PERF_PERM +#define TRACE_EVENT_PERF_PERM(name, expr...)				\ +	__TRACE_EVENT_PERF_PERM(name, expr) +  #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) @@ -107,6 +128,12 @@  #undef __field_ext  #define __field_ext(type, item, filter_type) +#undef __field_struct +#define __field_struct(type, item) + +#undef __field_struct_ext +#define __field_struct_ext(type, item, filter_type) +  #undef __array  #define __array(type, item, len) @@ -116,6 +143,9 @@  #undef __string  #define __string(item, src) __dynamic_array(char, item, -1) +#undef __bitmask +#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1) +  #undef DECLARE_EVENT_CLASS  #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)	\  	struct ftrace_data_offsets_##call {				\ @@ -129,6 +159,12 @@  #define DEFINE_EVENT_PRINT(template, name, proto, args, print)	\  	DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) +#undef TRACE_EVENT_FLAGS +#define TRACE_EVENT_FLAGS(event, flag) + +#undef TRACE_EVENT_PERF_PERM +#define TRACE_EVENT_PERF_PERM(event, expr...) +  #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)  /* @@ -179,9 +215,22 @@  #define __get_dynamic_array(field)	\  		((void *)__entry + (__entry->__data_loc_##field & 0xffff)) +#undef __get_dynamic_array_len +#define __get_dynamic_array_len(field)	\ +		((__entry->__data_loc_##field >> 16) & 0xffff) +  #undef __get_str  #define __get_str(field) (char *)__get_dynamic_array(field) +#undef __get_bitmask +#define __get_bitmask(field)						\ +	({								\ +		void *__bitmask = __get_dynamic_array(field);		\ +		unsigned int __bitmask_size;				\ +		__bitmask_size = __get_dynamic_array_len(field);	\ +		ftrace_print_bitmask_seq(p, __bitmask, __bitmask_size);	\ +	}) +  #undef __print_flags  #define __print_flags(flag, delim, flag_array...)			\  	({								\ @@ -198,6 +247,19 @@  		ftrace_print_symbols_seq(p, value, symbols);		\  	}) +#undef __print_symbolic_u64 +#if BITS_PER_LONG == 32 +#define __print_symbolic_u64(value, symbol_array...)			\ +	({								\ +		static const struct trace_print_flags_u64 symbols[] =	\ +			{ symbol_array, { -1, NULL } };			\ +		ftrace_print_symbols_seq_u64(p, value, symbols);	\ +	}) +#else +#define __print_symbolic_u64(value, symbol_array...)			\ +			__print_symbolic(value, symbol_array) +#endif +  #undef __print_hex  #define __print_hex(buf, buf_len) ftrace_print_hex_seq(p, buf, buf_len) @@ -207,29 +269,18 @@ static notrace enum print_line_t					\  ftrace_raw_output_##call(struct trace_iterator *iter, int flags,	\  			 struct trace_event *trace_event)		\  {									\ -	struct ftrace_event_call *event;				\  	struct trace_seq *s = &iter->seq;				\ +	struct trace_seq __maybe_unused *p = &iter->tmp_seq;		\  	struct ftrace_raw_##call *field;				\ -	struct trace_entry *entry;					\ -	struct trace_seq *p = &iter->tmp_seq;				\  	int ret;							\  									\ -	event = container_of(trace_event, struct ftrace_event_call,	\ -			     event);					\ -									\ -	entry = iter->ent;						\ -									\ -	if (entry->type != event->event.type) {				\ -		WARN_ON_ONCE(1);					\ -		return TRACE_TYPE_UNHANDLED;				\ -	}								\ -									\ -	field = (typeof(field))entry;					\ +	field = (typeof(field))iter->ent;				\  									\ -	trace_seq_init(p);						\ -	ret = trace_seq_printf(s, "%s: ", event->name);			\ +	ret = ftrace_raw_output_prep(iter, trace_event);		\  	if (ret)							\ -		ret = trace_seq_printf(s, print);			\ +		return ret;						\ +									\ +	ret = trace_seq_printf(s, print);				\  	if (!ret)							\  		return TRACE_TYPE_PARTIAL_LINE;				\  									\ @@ -245,11 +296,9 @@ static notrace enum print_line_t					\  ftrace_raw_output_##call(struct trace_iterator *iter, int flags,	\  			 struct trace_event *event)			\  {									\ -	struct trace_seq *s = &iter->seq;				\  	struct ftrace_raw_##template *field;				\  	struct trace_entry *entry;					\  	struct trace_seq *p = &iter->tmp_seq;				\ -	int ret;							\  									\  	entry = iter->ent;						\  									\ @@ -261,13 +310,7 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags,	\  	field = (typeof(field))entry;					\  									\  	trace_seq_init(p);						\ -	ret = trace_seq_printf(s, "%s: ", #call);			\ -	if (ret)							\ -		ret = trace_seq_printf(s, print);			\ -	if (!ret)							\ -		return TRACE_TYPE_PARTIAL_LINE;				\ -									\ -	return TRACE_TYPE_HANDLED;					\ +	return ftrace_output_call(iter, #call, print);			\  }									\  static struct trace_event_functions ftrace_event_type_funcs_##call = {	\  	.trace			= ftrace_raw_output_##call,		\ @@ -284,18 +327,33 @@ static struct trace_event_functions ftrace_event_type_funcs_##call = {	\  	if (ret)							\  		return ret; +#undef __field_struct_ext +#define __field_struct_ext(type, item, filter_type)			\ +	ret = trace_define_field(event_call, #type, #item,		\ +				 offsetof(typeof(field), item),		\ +				 sizeof(field.item),			\ +				 0, filter_type);			\ +	if (ret)							\ +		return ret; +  #undef __field  #define __field(type, item)	__field_ext(type, item, FILTER_OTHER) +#undef __field_struct +#define __field_struct(type, item) __field_struct_ext(type, item, FILTER_OTHER) +  #undef __array  #define __array(type, item, len)					\ -	BUILD_BUG_ON(len > MAX_FILTER_STR_VAL);				\ -	ret = trace_define_field(event_call, #type "[" #len "]", #item,	\ +	do {								\ +		char *type_str = #type"["__stringify(len)"]";		\ +		BUILD_BUG_ON(len > MAX_FILTER_STR_VAL);			\ +		ret = trace_define_field(event_call, type_str, #item,	\  				 offsetof(typeof(field), item),		\  				 sizeof(field.item),			\  				 is_signed_type(type), FILTER_OTHER);	\ -	if (ret)							\ -		return ret; +		if (ret)						\ +			return ret;					\ +	} while (0);  #undef __dynamic_array  #define __dynamic_array(type, item, len)				       \ @@ -307,9 +365,12 @@ static struct trace_event_functions ftrace_event_type_funcs_##call = {	\  #undef __string  #define __string(item, src) __dynamic_array(char, item, -1) +#undef __bitmask +#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1) +  #undef DECLARE_EVENT_CLASS  #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print)	\ -static int notrace							\ +static int notrace __init						\  ftrace_define_fields_##call(struct ftrace_event_call *event_call)	\  {									\  	struct ftrace_raw_##call field;					\ @@ -342,18 +403,49 @@ ftrace_define_fields_##call(struct ftrace_event_call *event_call)	\  #undef __field_ext  #define __field_ext(type, item, filter_type) +#undef __field_struct +#define __field_struct(type, item) + +#undef __field_struct_ext +#define __field_struct_ext(type, item, filter_type) +  #undef __array  #define __array(type, item, len)  #undef __dynamic_array  #define __dynamic_array(type, item, len)				\ +	__item_length = (len) * sizeof(type);				\  	__data_offsets->item = __data_size +				\  			       offsetof(typeof(*entry), __data);	\ -	__data_offsets->item |= (len * sizeof(type)) << 16;		\ -	__data_size += (len) * sizeof(type); +	__data_offsets->item |= __item_length << 16;			\ +	__data_size += __item_length;  #undef __string -#define __string(item, src) __dynamic_array(char, item, strlen(src) + 1) +#define __string(item, src) __dynamic_array(char, item,			\ +		    strlen((src) ? (const char *)(src) : "(null)") + 1) + +/* + * __bitmask_size_in_bytes_raw is the number of bytes needed to hold + * num_possible_cpus(). + */ +#define __bitmask_size_in_bytes_raw(nr_bits)	\ +	(((nr_bits) + 7) / 8) + +#define __bitmask_size_in_longs(nr_bits)			\ +	((__bitmask_size_in_bytes_raw(nr_bits) +		\ +	  ((BITS_PER_LONG / 8) - 1)) / (BITS_PER_LONG / 8)) + +/* + * __bitmask_size_in_bytes is the number of bytes needed to hold + * num_possible_cpus() padded out to the nearest long. This is what + * is saved in the buffer, just to be consistent. + */ +#define __bitmask_size_in_bytes(nr_bits)				\ +	(__bitmask_size_in_longs(nr_bits) * (BITS_PER_LONG / 8)) + +#undef __bitmask +#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item,	\ +					 __bitmask_size_in_longs(nr_bits))  #undef DECLARE_EVENT_CLASS  #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)	\ @@ -361,6 +453,7 @@ static inline notrace int ftrace_get_offsets_##call(			\  	struct ftrace_data_offsets_##call *__data_offsets, proto)       \  {									\  	int __data_size = 0;						\ +	int __maybe_unused __item_length;				\  	struct ftrace_raw_##call __maybe_unused *entry;			\  									\  	tstruct;							\ @@ -388,8 +481,11 @@ static inline notrace int ftrace_get_offsets_##call(			\   *   * static void ftrace_raw_event_<call>(void *__data, proto)   * { - *	struct ftrace_event_call *event_call = __data; + *	struct ftrace_event_file *ftrace_file = __data; + *	struct ftrace_event_call *event_call = ftrace_file->event_call;   *	struct ftrace_data_offsets_<call> __maybe_unused __data_offsets; + *	unsigned long eflags = ftrace_file->flags; + *	enum event_trigger_type __tt = ETT_NONE;   *	struct ring_buffer_event *event;   *	struct ftrace_raw_<call> *entry; <-- defined in stage 1   *	struct ring_buffer *buffer; @@ -397,12 +493,19 @@ static inline notrace int ftrace_get_offsets_##call(			\   *	int __data_size;   *	int pc;   * + *	if (!(eflags & FTRACE_EVENT_FL_TRIGGER_COND)) { + *		if (eflags & FTRACE_EVENT_FL_TRIGGER_MODE) + *			event_triggers_call(ftrace_file, NULL); + *		if (eflags & FTRACE_EVENT_FL_SOFT_DISABLED) + *			return; + *	} + *   *	local_save_flags(irq_flags);   *	pc = preempt_count();   *   *	__data_size = ftrace_get_offsets_<call>(&__data_offsets, args);   * - *	event = trace_current_buffer_lock_reserve(&buffer, + *	event = trace_event_buffer_lock_reserve(&buffer, ftrace_file,   *				  event_<call>->event.type,   *				  sizeof(*entry) + __data_size,   *				  irq_flags, pc); @@ -413,9 +516,17 @@ static inline notrace int ftrace_get_offsets_##call(			\   *	{ <assign>; }  <-- Here we assign the entries by the __field and   *			   __array macros.   * - *	if (!filter_current_check_discard(buffer, event_call, entry, event)) - *		trace_current_buffer_unlock_commit(buffer, - *						   event, irq_flags, pc); + *	if (eflags & FTRACE_EVENT_FL_TRIGGER_COND) + *		__tt = event_triggers_call(ftrace_file, entry); + * + *	if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, + *		     &ftrace_file->flags)) + *		ring_buffer_discard_commit(buffer, event); + *	else if (!filter_check_discard(ftrace_file, entry, buffer, event)) + *		trace_buffer_unlock_commit(buffer, event, irq_flags, pc); + * + *	if (__tt) + *		event_triggers_post_call(ftrace_file, __tt);   * }   *   * static struct trace_event ftrace_event_type_<call> = { @@ -433,14 +544,19 @@ static inline notrace int ftrace_get_offsets_##call(			\   *	.reg			= ftrace_event_reg,   * };   * - * static struct ftrace_event_call __used - * __attribute__((__aligned__(4))) - * __attribute__((section("_ftrace_events"))) event_<call> = { - *	.name			= "<call>", + * static struct ftrace_event_call event_<call> = {   *	.class			= event_class_<template>, + *	{ + *		.tp			= &__tracepoint_<call>, + *	},   *	.event			= &ftrace_event_type_<call>,   *	.print_fmt		= print_fmt_<call>, + *	.flags			= TRACE_EVENT_FL_TRACEPOINT,   * }; + * // its only safe to use pointers when doing linker tricks to + * // create an array. + * static struct ftrace_event_call __used + * __attribute__((section("_ftrace_events"))) *__event_<call> = &event_<call>;   *   */ @@ -464,6 +580,9 @@ static inline notrace int ftrace_get_offsets_##call(			\  #undef __field  #define __field(type, item) +#undef __field_struct +#define __field_struct(type, item) +  #undef __array  #define __array(type, item, len) @@ -472,17 +591,33 @@ static inline notrace int ftrace_get_offsets_##call(			\  	__entry->__data_loc_##item = __data_offsets.item;  #undef __string -#define __string(item, src) __dynamic_array(char, item, -1)       	\ +#define __string(item, src) __dynamic_array(char, item, -1)  #undef __assign_str  #define __assign_str(dst, src)						\ -	strcpy(__get_str(dst), src); +	strcpy(__get_str(dst), (src) ? (const char *)(src) : "(null)"); + +#undef __bitmask +#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1) + +#undef __get_bitmask +#define __get_bitmask(field) (char *)__get_dynamic_array(field) + +#undef __assign_bitmask +#define __assign_bitmask(dst, src, nr_bits)					\ +	memcpy(__get_bitmask(dst), (src), __bitmask_size_in_bytes(nr_bits))  #undef TP_fast_assign  #define TP_fast_assign(args...) args -#undef TP_perf_assign -#define TP_perf_assign(args...) +#undef __perf_addr +#define __perf_addr(a)	(a) + +#undef __perf_count +#define __perf_count(c)	(c) + +#undef __perf_task +#define __perf_task(t)	(t)  #undef DECLARE_EVENT_CLASS  #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)	\ @@ -490,35 +625,28 @@ static inline notrace int ftrace_get_offsets_##call(			\  static notrace void							\  ftrace_raw_event_##call(void *__data, proto)				\  {									\ -	struct ftrace_event_call *event_call = __data;			\ +	struct ftrace_event_file *ftrace_file = __data;			\  	struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ -	struct ring_buffer_event *event;				\ +	struct ftrace_event_buffer fbuffer;				\  	struct ftrace_raw_##call *entry;				\ -	struct ring_buffer *buffer;					\ -	unsigned long irq_flags;					\  	int __data_size;						\ -	int pc;								\  									\ -	local_save_flags(irq_flags);					\ -	pc = preempt_count();						\ +	if (ftrace_trigger_soft_disabled(ftrace_file))			\ +		return;							\  									\  	__data_size = ftrace_get_offsets_##call(&__data_offsets, args); \  									\ -	event = trace_current_buffer_lock_reserve(&buffer,		\ -				 event_call->event.type,		\ -				 sizeof(*entry) + __data_size,		\ -				 irq_flags, pc);			\ -	if (!event)							\ +	entry = ftrace_event_buffer_reserve(&fbuffer, ftrace_file,	\ +				 sizeof(*entry) + __data_size);		\ +									\ +	if (!entry)							\  		return;							\ -	entry	= ring_buffer_event_data(event);			\  									\  	tstruct								\  									\  	{ assign; }							\  									\ -	if (!filter_current_check_discard(buffer, event_call, entry, event)) \ -		trace_nowake_buffer_unlock_commit(buffer,		\ -						  event, irq_flags, pc); \ +	ftrace_event_buffer_commit(&fbuffer);				\  }  /*   * The ftrace_test_probe is compiled out, it is only here as a build time check @@ -543,8 +671,11 @@ static inline void ftrace_test_probe_##call(void)			\  #undef __print_flags  #undef __print_symbolic +#undef __print_hex  #undef __get_dynamic_array +#undef __get_dynamic_array_len  #undef __get_str +#undef __get_bitmask  #undef TP_printk  #define TP_printk(fmt, args...) "\"" fmt "\", "  __stringify(args) @@ -553,7 +684,7 @@ static inline void ftrace_test_probe_##call(void)			\  #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)	\  _TRACE_PERF_PROTO(call, PARAMS(proto));					\  static const char print_fmt_##call[] = print;				\ -static struct ftrace_event_class __used event_class_##call = {		\ +static struct ftrace_event_class __used __refdata event_class_##call = { \  	.system			= __stringify(TRACE_SYSTEM),		\  	.define_fields		= ftrace_define_fields_##call,		\  	.fields			= LIST_HEAD_INIT(event_class_##call.fields),\ @@ -566,104 +697,37 @@ static struct ftrace_event_class __used event_class_##call = {		\  #undef DEFINE_EVENT  #define DEFINE_EVENT(template, call, proto, args)			\  									\ -static struct ftrace_event_call __used					\ -__attribute__((__aligned__(4)))						\ -__attribute__((section("_ftrace_events"))) event_##call = {		\ -	.name			= #call,				\ +static struct ftrace_event_call __used event_##call = {			\  	.class			= &event_class_##template,		\ +	{								\ +		.tp			= &__tracepoint_##call,		\ +	},								\  	.event.funcs		= &ftrace_event_type_funcs_##template,	\  	.print_fmt		= print_fmt_##template,			\ -}; +	.flags			= TRACE_EVENT_FL_TRACEPOINT,		\ +};									\ +static struct ftrace_event_call __used					\ +__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call  #undef DEFINE_EVENT_PRINT  #define DEFINE_EVENT_PRINT(template, call, proto, args, print)		\  									\  static const char print_fmt_##call[] = print;				\  									\ -static struct ftrace_event_call __used					\ -__attribute__((__aligned__(4)))						\ -__attribute__((section("_ftrace_events"))) event_##call = {		\ -	.name			= #call,				\ +static struct ftrace_event_call __used event_##call = {			\  	.class			= &event_class_##template,		\ +	{								\ +		.tp			= &__tracepoint_##call,		\ +	},								\  	.event.funcs		= &ftrace_event_type_funcs_##call,	\  	.print_fmt		= print_fmt_##call,			\ -} +	.flags			= TRACE_EVENT_FL_TRACEPOINT,		\ +};									\ +static struct ftrace_event_call __used					\ +__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call  #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) -/* - * Define the insertion callback to perf events - * - * The job is very similar to ftrace_raw_event_<call> except that we don't - * insert in the ring buffer but in a perf counter. - * - * static void ftrace_perf_<call>(proto) - * { - *	struct ftrace_data_offsets_<call> __maybe_unused __data_offsets; - *	struct ftrace_event_call *event_call = &event_<call>; - *	extern void perf_tp_event(int, u64, u64, void *, int); - *	struct ftrace_raw_##call *entry; - *	struct perf_trace_buf *trace_buf; - *	u64 __addr = 0, __count = 1; - *	unsigned long irq_flags; - *	struct trace_entry *ent; - *	int __entry_size; - *	int __data_size; - *	int __cpu - *	int pc; - * - *	pc = preempt_count(); - * - *	__data_size = ftrace_get_offsets_<call>(&__data_offsets, args); - * - *	// Below we want to get the aligned size by taking into account - *	// the u32 field that will later store the buffer size - *	__entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32), - *			     sizeof(u64)); - *	__entry_size -= sizeof(u32); - * - *	// Protect the non nmi buffer - *	// This also protects the rcu read side - *	local_irq_save(irq_flags); - *	__cpu = smp_processor_id(); - * - *	if (in_nmi()) - *		trace_buf = rcu_dereference_sched(perf_trace_buf_nmi); - *	else - *		trace_buf = rcu_dereference_sched(perf_trace_buf); - * - *	if (!trace_buf) - *		goto end; - * - *	trace_buf = per_cpu_ptr(trace_buf, __cpu); - * - * 	// Avoid recursion from perf that could mess up the buffer - * 	if (trace_buf->recursion++) - *		goto end_recursion; - * - * 	raw_data = trace_buf->buf; - * - *	// Make recursion update visible before entering perf_tp_event - *	// so that we protect from perf recursions. - * - *	barrier(); - * - *	//zero dead bytes from alignment to avoid stack leak to userspace: - *	*(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; - *	entry = (struct ftrace_raw_<call> *)raw_data; - *	ent = &entry->ent; - *	tracing_generic_entry_update(ent, irq_flags, pc); - *	ent->type = event_call->id; - * - *	<tstruct> <- do some jobs with dynamic arrays - * - *	<assign>  <- affect our values - * - *	perf_tp_event(event_call->id, __addr, __count, entry, - *		     __entry_size);  <- submit them to perf counter - * - * } - */  #ifdef CONFIG_PERF_EVENTS @@ -674,14 +738,24 @@ __attribute__((section("_ftrace_events"))) event_##call = {		\  #define __get_dynamic_array(field)	\  		((void *)__entry + (__entry->__data_loc_##field & 0xffff)) +#undef __get_dynamic_array_len +#define __get_dynamic_array_len(field)	\ +		((__entry->__data_loc_##field >> 16) & 0xffff) +  #undef __get_str  #define __get_str(field) (char *)__get_dynamic_array(field) +#undef __get_bitmask +#define __get_bitmask(field) (char *)__get_dynamic_array(field) +  #undef __perf_addr -#define __perf_addr(a) __addr = (a) +#define __perf_addr(a)	(__addr = (a))  #undef __perf_count -#define __perf_count(c) __count = (c) +#define __perf_count(c)	(__count = (c)) + +#undef __perf_task +#define __perf_task(t)	(__task = (t))  #undef DECLARE_EVENT_CLASS  #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)	\ @@ -693,24 +767,26 @@ perf_trace_##call(void *__data, proto)					\  	struct ftrace_raw_##call *entry;				\  	struct pt_regs __regs;						\  	u64 __addr = 0, __count = 1;					\ +	struct task_struct *__task = NULL;				\  	struct hlist_head *head;					\  	int __entry_size;						\  	int __data_size;						\  	int rctx;							\  									\ -	perf_fetch_caller_regs(&__regs);				\ -									\  	__data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ +									\ +	head = this_cpu_ptr(event_call->perf_events);			\ +	if (__builtin_constant_p(!__task) && !__task &&			\ +				hlist_empty(head))			\ +		return;							\ +									\  	__entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\  			     sizeof(u64));				\  	__entry_size -= sizeof(u32);					\  									\ -	if (WARN_ONCE(__entry_size > PERF_MAX_TRACE_SIZE,		\ -		      "profile buffer not large enough"))		\ -		return;							\ -									\ -	entry = (struct ftrace_raw_##call *)perf_trace_buf_prepare(	\ -		__entry_size, event_call->event.type, &__regs, &rctx);	\ +	perf_fetch_caller_regs(&__regs);				\ +	entry = perf_trace_buf_prepare(__entry_size,			\ +			event_call->event.type, &__regs, &rctx);	\  	if (!entry)							\  		return;							\  									\ @@ -718,9 +794,8 @@ perf_trace_##call(void *__data, proto)					\  									\  	{ assign; }							\  									\ -	head = this_cpu_ptr(event_call->perf_events);			\  	perf_trace_buf_submit(entry, __entry_size, rctx, __addr,	\ -		__count, &__regs, head);				\ +		__count, &__regs, head, __task);			\  }  /* @@ -743,5 +818,3 @@ static inline void perf_test_probe_##call(void)				\  #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)  #endif /* CONFIG_PERF_EVENTS */ -#undef _TRACE_PROFILE_INIT - diff --git a/include/trace/syscall.h b/include/trace/syscall.h index 31966a4fb8c..9674145e2f6 100644 --- a/include/trace/syscall.h +++ b/include/trace/syscall.h @@ -4,6 +4,7 @@  #include <linux/tracepoint.h>  #include <linux/unistd.h>  #include <linux/ftrace_event.h> +#include <linux/thread_info.h>  #include <asm/ptrace.h> @@ -16,6 +17,7 @@   * @nb_args: number of parameters it takes   * @types: list of types as strings   * @args: list of args as strings (args[i] matches types[i]) + * @enter_fields: list of fields for syscall_enter trace event   * @enter_event: associated syscall_enter trace event   * @exit_event: associated syscall_exit trace event   */ @@ -31,27 +33,18 @@ struct syscall_metadata {  	struct ftrace_event_call *exit_event;  }; -#ifdef CONFIG_FTRACE_SYSCALLS -extern unsigned long arch_syscall_addr(int nr); -extern int init_syscall_trace(struct ftrace_event_call *call); - -extern int reg_event_syscall_enter(struct ftrace_event_call *call); -extern void unreg_event_syscall_enter(struct ftrace_event_call *call); -extern int reg_event_syscall_exit(struct ftrace_event_call *call); -extern void unreg_event_syscall_exit(struct ftrace_event_call *call); -extern int -ftrace_format_syscall(struct ftrace_event_call *call, struct trace_seq *s); -enum print_line_t print_syscall_enter(struct trace_iterator *iter, int flags, -				      struct trace_event *event); -enum print_line_t print_syscall_exit(struct trace_iterator *iter, int flags, -				     struct trace_event *event); -#endif - -#ifdef CONFIG_PERF_EVENTS -int perf_sysenter_enable(struct ftrace_event_call *call); -void perf_sysenter_disable(struct ftrace_event_call *call); -int perf_sysexit_enable(struct ftrace_event_call *call); -void perf_sysexit_disable(struct ftrace_event_call *call); +#if defined(CONFIG_TRACEPOINTS) && defined(CONFIG_HAVE_SYSCALL_TRACEPOINTS) +static inline void syscall_tracepoint_update(struct task_struct *p) +{ +	if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) +		set_tsk_thread_flag(p, TIF_SYSCALL_TRACEPOINT); +	else +		clear_tsk_thread_flag(p, TIF_SYSCALL_TRACEPOINT); +} +#else +static inline void syscall_tracepoint_update(struct task_struct *p) +{ +}  #endif  #endif /* _TRACE_SYSCALL_H */  | 
