diff options
Diffstat (limited to 'include/trace/events/btrfs.h')
| -rw-r--r-- | include/trace/events/btrfs.h | 309 |
1 files changed, 283 insertions, 26 deletions
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h index 84f3001a568..4ee4e30d26d 100644 --- a/include/trace/events/btrfs.h +++ b/include/trace/events/btrfs.h @@ -6,6 +6,7 @@ #include <linux/writeback.h> #include <linux/tracepoint.h> +#include <trace/events/gfpflags.h> struct btrfs_root; struct btrfs_fs_info; @@ -20,6 +21,8 @@ struct btrfs_block_group_cache; struct btrfs_free_cluster; struct map_lookup; struct extent_buffer; +struct btrfs_work; +struct __btrfs_workqueue; #define show_ref_type(type) \ __print_symbolic(type, \ @@ -39,21 +42,26 @@ struct extent_buffer; { BTRFS_ROOT_TREE_DIR_OBJECTID, "ROOT_TREE_DIR" }, \ { BTRFS_CSUM_TREE_OBJECTID, "CSUM_TREE" }, \ { BTRFS_TREE_LOG_OBJECTID, "TREE_LOG" }, \ + { BTRFS_QUOTA_TREE_OBJECTID, "QUOTA_TREE" }, \ { BTRFS_TREE_RELOC_OBJECTID, "TREE_RELOC" }, \ + { BTRFS_UUID_TREE_OBJECTID, "UUID_RELOC" }, \ { BTRFS_DATA_RELOC_TREE_OBJECTID, "DATA_RELOC_TREE" }) #define show_root_type(obj) \ obj, ((obj >= BTRFS_DATA_RELOC_TREE_OBJECTID) || \ - (obj <= BTRFS_CSUM_TREE_OBJECTID )) ? __show_root_type(obj) : "-" + (obj >= BTRFS_ROOT_TREE_OBJECTID && \ + obj <= BTRFS_QUOTA_TREE_OBJECTID)) ? __show_root_type(obj) : "-" #define BTRFS_GROUP_FLAGS \ - { BTRFS_BLOCK_GROUP_DATA, "DATA"}, \ - { BTRFS_BLOCK_GROUP_SYSTEM, "SYSTEM"}, \ - { BTRFS_BLOCK_GROUP_METADATA, "METADATA"}, \ - { BTRFS_BLOCK_GROUP_RAID0, "RAID0"}, \ - { BTRFS_BLOCK_GROUP_RAID1, "RAID1"}, \ - { BTRFS_BLOCK_GROUP_DUP, "DUP"}, \ - { BTRFS_BLOCK_GROUP_RAID10, "RAID10"} + { BTRFS_BLOCK_GROUP_DATA, "DATA"}, \ + { BTRFS_BLOCK_GROUP_SYSTEM, "SYSTEM"}, \ + { BTRFS_BLOCK_GROUP_METADATA, "METADATA"}, \ + { BTRFS_BLOCK_GROUP_RAID0, "RAID0"}, \ + { BTRFS_BLOCK_GROUP_RAID1, "RAID1"}, \ + { BTRFS_BLOCK_GROUP_DUP, "DUP"}, \ + { BTRFS_BLOCK_GROUP_RAID10, "RAID10"}, \ + { BTRFS_BLOCK_GROUP_RAID5, "RAID5"}, \ + { BTRFS_BLOCK_GROUP_RAID6, "RAID6"} #define BTRFS_UUID_SIZE 16 @@ -152,14 +160,18 @@ DEFINE_EVENT(btrfs__inode, btrfs_inode_evict, { EXTENT_FLAG_PINNED, "PINNED" }, \ { EXTENT_FLAG_COMPRESSED, "COMPRESSED" }, \ { EXTENT_FLAG_VACANCY, "VACANCY" }, \ - { EXTENT_FLAG_PREALLOC, "PREALLOC" }) + { EXTENT_FLAG_PREALLOC, "PREALLOC" }, \ + { EXTENT_FLAG_LOGGING, "LOGGING" }, \ + { EXTENT_FLAG_FILLING, "FILLING" }) -TRACE_EVENT(btrfs_get_extent, +TRACE_EVENT_CONDITION(btrfs_get_extent, TP_PROTO(struct btrfs_root *root, struct extent_map *map), TP_ARGS(root, map), + TP_CONDITION(map), + TP_STRUCT__entry( __field( u64, root_objectid ) __field( u64, start ) @@ -198,14 +210,19 @@ TRACE_EVENT(btrfs_get_extent, __entry->refs, __entry->compress_type) ); -#define show_ordered_flags(flags) \ - __print_symbolic(flags, \ - { BTRFS_ORDERED_IO_DONE, "IO_DONE" }, \ - { BTRFS_ORDERED_COMPLETE, "COMPLETE" }, \ - { BTRFS_ORDERED_NOCOW, "NOCOW" }, \ - { BTRFS_ORDERED_COMPRESSED, "COMPRESSED" }, \ - { BTRFS_ORDERED_PREALLOC, "PREALLOC" }, \ - { BTRFS_ORDERED_DIRECT, "DIRECT" }) +#define show_ordered_flags(flags) \ + __print_flags(flags, "|", \ + { (1 << BTRFS_ORDERED_IO_DONE), "IO_DONE" }, \ + { (1 << BTRFS_ORDERED_COMPLETE), "COMPLETE" }, \ + { (1 << BTRFS_ORDERED_NOCOW), "NOCOW" }, \ + { (1 << BTRFS_ORDERED_COMPRESSED), "COMPRESSED" }, \ + { (1 << BTRFS_ORDERED_PREALLOC), "PREALLOC" }, \ + { (1 << BTRFS_ORDERED_DIRECT), "DIRECT" }, \ + { (1 << BTRFS_ORDERED_IOERR), "IOERR" }, \ + { (1 << BTRFS_ORDERED_UPDATED_ISIZE), "UPDATED_ISIZE" }, \ + { (1 << BTRFS_ORDERED_LOGGED_CSUM), "LOGGED_CSUM" }, \ + { (1 << BTRFS_ORDERED_TRUNCATED), "TRUNCATED" }) + DECLARE_EVENT_CLASS(btrfs__ordered_extent, @@ -428,7 +445,7 @@ TRACE_EVENT(btrfs_sync_fs, { BTRFS_UPDATE_DELAYED_HEAD, "UPDATE_DELAYED_HEAD" }) -TRACE_EVENT(btrfs_delayed_tree_ref, +DECLARE_EVENT_CLASS(btrfs_delayed_tree_ref, TP_PROTO(struct btrfs_delayed_ref_node *ref, struct btrfs_delayed_tree_ref *full_ref, @@ -444,6 +461,7 @@ TRACE_EVENT(btrfs_delayed_tree_ref, __field( u64, ref_root ) __field( int, level ) __field( int, type ) + __field( u64, seq ) ), TP_fast_assign( @@ -454,20 +472,40 @@ TRACE_EVENT(btrfs_delayed_tree_ref, __entry->ref_root = full_ref->root; __entry->level = full_ref->level; __entry->type = ref->type; + __entry->seq = ref->seq; ), TP_printk("bytenr = %llu, num_bytes = %llu, action = %s, " "parent = %llu(%s), ref_root = %llu(%s), level = %d, " - "type = %s", + "type = %s, seq = %llu", (unsigned long long)__entry->bytenr, (unsigned long long)__entry->num_bytes, show_ref_action(__entry->action), show_root_type(__entry->parent), show_root_type(__entry->ref_root), - __entry->level, show_ref_type(__entry->type)) + __entry->level, show_ref_type(__entry->type), + (unsigned long long)__entry->seq) +); + +DEFINE_EVENT(btrfs_delayed_tree_ref, add_delayed_tree_ref, + + TP_PROTO(struct btrfs_delayed_ref_node *ref, + struct btrfs_delayed_tree_ref *full_ref, + int action), + + TP_ARGS(ref, full_ref, action) ); -TRACE_EVENT(btrfs_delayed_data_ref, +DEFINE_EVENT(btrfs_delayed_tree_ref, run_delayed_tree_ref, + + TP_PROTO(struct btrfs_delayed_ref_node *ref, + struct btrfs_delayed_tree_ref *full_ref, + int action), + + TP_ARGS(ref, full_ref, action) +); + +DECLARE_EVENT_CLASS(btrfs_delayed_data_ref, TP_PROTO(struct btrfs_delayed_ref_node *ref, struct btrfs_delayed_data_ref *full_ref, @@ -484,6 +522,7 @@ TRACE_EVENT(btrfs_delayed_data_ref, __field( u64, owner ) __field( u64, offset ) __field( int, type ) + __field( u64, seq ) ), TP_fast_assign( @@ -495,11 +534,12 @@ TRACE_EVENT(btrfs_delayed_data_ref, __entry->owner = full_ref->objectid; __entry->offset = full_ref->offset; __entry->type = ref->type; + __entry->seq = ref->seq; ), TP_printk("bytenr = %llu, num_bytes = %llu, action = %s, " "parent = %llu(%s), ref_root = %llu(%s), owner = %llu, " - "offset = %llu, type = %s", + "offset = %llu, type = %s, seq = %llu", (unsigned long long)__entry->bytenr, (unsigned long long)__entry->num_bytes, show_ref_action(__entry->action), @@ -507,10 +547,29 @@ TRACE_EVENT(btrfs_delayed_data_ref, show_root_type(__entry->ref_root), (unsigned long long)__entry->owner, (unsigned long long)__entry->offset, - show_ref_type(__entry->type)) + show_ref_type(__entry->type), + (unsigned long long)__entry->seq) ); -TRACE_EVENT(btrfs_delayed_ref_head, +DEFINE_EVENT(btrfs_delayed_data_ref, add_delayed_data_ref, + + TP_PROTO(struct btrfs_delayed_ref_node *ref, + struct btrfs_delayed_data_ref *full_ref, + int action), + + TP_ARGS(ref, full_ref, action) +); + +DEFINE_EVENT(btrfs_delayed_data_ref, run_delayed_data_ref, + + TP_PROTO(struct btrfs_delayed_ref_node *ref, + struct btrfs_delayed_data_ref *full_ref, + int action), + + TP_ARGS(ref, full_ref, action) +); + +DECLARE_EVENT_CLASS(btrfs_delayed_ref_head, TP_PROTO(struct btrfs_delayed_ref_node *ref, struct btrfs_delayed_ref_head *head_ref, @@ -539,6 +598,24 @@ TRACE_EVENT(btrfs_delayed_ref_head, __entry->is_data) ); +DEFINE_EVENT(btrfs_delayed_ref_head, add_delayed_ref_head, + + TP_PROTO(struct btrfs_delayed_ref_node *ref, + struct btrfs_delayed_ref_head *head_ref, + int action), + + TP_ARGS(ref, head_ref, action) +); + +DEFINE_EVENT(btrfs_delayed_ref_head, run_delayed_ref_head, + + TP_PROTO(struct btrfs_delayed_ref_node *ref, + struct btrfs_delayed_ref_head *head_ref, + int action), + + TP_ARGS(ref, head_ref, action) +); + #define show_chunk_type(type) \ __print_flags(type, "|", \ { BTRFS_BLOCK_GROUP_DATA, "DATA" }, \ @@ -547,7 +624,9 @@ TRACE_EVENT(btrfs_delayed_ref_head, { BTRFS_BLOCK_GROUP_RAID0, "RAID0" }, \ { BTRFS_BLOCK_GROUP_RAID1, "RAID1" }, \ { BTRFS_BLOCK_GROUP_DUP, "DUP" }, \ - { BTRFS_BLOCK_GROUP_RAID10, "RAID10"}) + { BTRFS_BLOCK_GROUP_RAID10, "RAID10"}, \ + { BTRFS_BLOCK_GROUP_RAID5, "RAID5" }, \ + { BTRFS_BLOCK_GROUP_RAID6, "RAID6" }) DECLARE_EVENT_CLASS(btrfs__chunk, @@ -862,6 +941,184 @@ TRACE_EVENT(btrfs_setup_cluster, __entry->size, __entry->max_size, __entry->bitmap) ); +struct extent_state; +TRACE_EVENT(alloc_extent_state, + + TP_PROTO(struct extent_state *state, gfp_t mask, unsigned long IP), + + TP_ARGS(state, mask, IP), + + TP_STRUCT__entry( + __field(struct extent_state *, state) + __field(gfp_t, mask) + __field(unsigned long, ip) + ), + + TP_fast_assign( + __entry->state = state, + __entry->mask = mask, + __entry->ip = IP + ), + + TP_printk("state=%p; mask = %s; caller = %pF", __entry->state, + show_gfp_flags(__entry->mask), (void *)__entry->ip) +); + +TRACE_EVENT(free_extent_state, + + TP_PROTO(struct extent_state *state, unsigned long IP), + + TP_ARGS(state, IP), + + TP_STRUCT__entry( + __field(struct extent_state *, state) + __field(unsigned long, ip) + ), + + TP_fast_assign( + __entry->state = state, + __entry->ip = IP + ), + + TP_printk(" state=%p; caller = %pF", __entry->state, + (void *)__entry->ip) +); + +DECLARE_EVENT_CLASS(btrfs__work, + + TP_PROTO(struct btrfs_work *work), + + TP_ARGS(work), + + TP_STRUCT__entry( + __field( void *, work ) + __field( void *, wq ) + __field( void *, func ) + __field( void *, ordered_func ) + __field( void *, ordered_free ) + ), + + TP_fast_assign( + __entry->work = work; + __entry->wq = work->wq; + __entry->func = work->func; + __entry->ordered_func = work->ordered_func; + __entry->ordered_free = work->ordered_free; + ), + + TP_printk("work=%p, wq=%p, func=%p, ordered_func=%p, ordered_free=%p", + __entry->work, __entry->wq, __entry->func, + __entry->ordered_func, __entry->ordered_free) +); + +/* For situiations that the work is freed */ +DECLARE_EVENT_CLASS(btrfs__work__done, + + TP_PROTO(struct btrfs_work *work), + + TP_ARGS(work), + + TP_STRUCT__entry( + __field( void *, work ) + ), + + TP_fast_assign( + __entry->work = work; + ), + + TP_printk("work->%p", __entry->work) +); + +DEFINE_EVENT(btrfs__work, btrfs_work_queued, + + TP_PROTO(struct btrfs_work *work), + + TP_ARGS(work) +); + +DEFINE_EVENT(btrfs__work, btrfs_work_sched, + + TP_PROTO(struct btrfs_work *work), + + TP_ARGS(work) +); + +DEFINE_EVENT(btrfs__work, btrfs_normal_work_done, + + TP_PROTO(struct btrfs_work *work), + + TP_ARGS(work) +); + +DEFINE_EVENT(btrfs__work__done, btrfs_all_work_done, + + TP_PROTO(struct btrfs_work *work), + + TP_ARGS(work) +); + +DEFINE_EVENT(btrfs__work, btrfs_ordered_sched, + + TP_PROTO(struct btrfs_work *work), + + TP_ARGS(work) +); + +DECLARE_EVENT_CLASS(btrfs__workqueue, + + TP_PROTO(struct __btrfs_workqueue *wq, const char *name, int high), + + TP_ARGS(wq, name, high), + + TP_STRUCT__entry( + __field( void *, wq ) + __string( name, name ) + __field( int , high ) + ), + + TP_fast_assign( + __entry->wq = wq; + __assign_str(name, name); + __entry->high = high; + ), + + TP_printk("name=%s%s, wq=%p", __get_str(name), + __print_flags(__entry->high, "", + {(WQ_HIGHPRI), "-high"}), + __entry->wq) +); + +DEFINE_EVENT(btrfs__workqueue, btrfs_workqueue_alloc, + + TP_PROTO(struct __btrfs_workqueue *wq, const char *name, int high), + + TP_ARGS(wq, name, high) +); + +DECLARE_EVENT_CLASS(btrfs__workqueue_done, + + TP_PROTO(struct __btrfs_workqueue *wq), + + TP_ARGS(wq), + + TP_STRUCT__entry( + __field( void *, wq ) + ), + + TP_fast_assign( + __entry->wq = wq; + ), + + TP_printk("wq=%p", __entry->wq) +); + +DEFINE_EVENT(btrfs__workqueue_done, btrfs_workqueue_destroy, + + TP_PROTO(struct __btrfs_workqueue *wq), + + TP_ARGS(wq) +); + #endif /* _TRACE_BTRFS_H */ /* This part must be outside protection */ |
