diff options
Diffstat (limited to 'include/trace/events/btrfs.h')
| -rw-r--r-- | include/trace/events/btrfs.h | 508 |
1 files changed, 483 insertions, 25 deletions
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h index f445cff66ab..4ee4e30d26d 100644 --- a/include/trace/events/btrfs.h +++ b/include/trace/events/btrfs.h @@ -6,6 +6,7 @@ #include <linux/writeback.h> #include <linux/tracepoint.h> +#include <trace/events/gfpflags.h> struct btrfs_root; struct btrfs_fs_info; @@ -16,8 +17,12 @@ struct btrfs_delayed_ref_node; struct btrfs_delayed_tree_ref; struct btrfs_delayed_data_ref; struct btrfs_delayed_ref_head; +struct btrfs_block_group_cache; +struct btrfs_free_cluster; struct map_lookup; struct extent_buffer; +struct btrfs_work; +struct __btrfs_workqueue; #define show_ref_type(type) \ __print_symbolic(type, \ @@ -28,7 +33,7 @@ struct extent_buffer; { BTRFS_SHARED_DATA_REF_KEY, "SHARED_DATA_REF" }) #define __show_root_type(obj) \ - __print_symbolic(obj, \ + __print_symbolic_u64(obj, \ { BTRFS_ROOT_TREE_OBJECTID, "ROOT_TREE" }, \ { BTRFS_EXTENT_TREE_OBJECTID, "EXTENT_TREE" }, \ { BTRFS_CHUNK_TREE_OBJECTID, "CHUNK_TREE" }, \ @@ -37,12 +42,28 @@ struct extent_buffer; { BTRFS_ROOT_TREE_DIR_OBJECTID, "ROOT_TREE_DIR" }, \ { BTRFS_CSUM_TREE_OBJECTID, "CSUM_TREE" }, \ { BTRFS_TREE_LOG_OBJECTID, "TREE_LOG" }, \ + { BTRFS_QUOTA_TREE_OBJECTID, "QUOTA_TREE" }, \ { BTRFS_TREE_RELOC_OBJECTID, "TREE_RELOC" }, \ + { BTRFS_UUID_TREE_OBJECTID, "UUID_RELOC" }, \ { BTRFS_DATA_RELOC_TREE_OBJECTID, "DATA_RELOC_TREE" }) #define show_root_type(obj) \ obj, ((obj >= BTRFS_DATA_RELOC_TREE_OBJECTID) || \ - (obj <= BTRFS_CSUM_TREE_OBJECTID )) ? __show_root_type(obj) : "-" + (obj >= BTRFS_ROOT_TREE_OBJECTID && \ + obj <= BTRFS_QUOTA_TREE_OBJECTID)) ? __show_root_type(obj) : "-" + +#define BTRFS_GROUP_FLAGS \ + { BTRFS_BLOCK_GROUP_DATA, "DATA"}, \ + { BTRFS_BLOCK_GROUP_SYSTEM, "SYSTEM"}, \ + { BTRFS_BLOCK_GROUP_METADATA, "METADATA"}, \ + { BTRFS_BLOCK_GROUP_RAID0, "RAID0"}, \ + { BTRFS_BLOCK_GROUP_RAID1, "RAID1"}, \ + { BTRFS_BLOCK_GROUP_DUP, "DUP"}, \ + { BTRFS_BLOCK_GROUP_RAID10, "RAID10"}, \ + { BTRFS_BLOCK_GROUP_RAID5, "RAID5"}, \ + { BTRFS_BLOCK_GROUP_RAID6, "RAID6"} + +#define BTRFS_UUID_SIZE 16 TRACE_EVENT(btrfs_transaction_commit, @@ -125,7 +146,7 @@ DEFINE_EVENT(btrfs__inode, btrfs_inode_evict, ); #define __show_map_type(type) \ - __print_symbolic(type, \ + __print_symbolic_u64(type, \ { EXTENT_MAP_LAST_BYTE, "LAST_BYTE" }, \ { EXTENT_MAP_HOLE, "HOLE" }, \ { EXTENT_MAP_INLINE, "INLINE" }, \ @@ -139,14 +160,18 @@ DEFINE_EVENT(btrfs__inode, btrfs_inode_evict, { EXTENT_FLAG_PINNED, "PINNED" }, \ { EXTENT_FLAG_COMPRESSED, "COMPRESSED" }, \ { EXTENT_FLAG_VACANCY, "VACANCY" }, \ - { EXTENT_FLAG_PREALLOC, "PREALLOC" }) + { EXTENT_FLAG_PREALLOC, "PREALLOC" }, \ + { EXTENT_FLAG_LOGGING, "LOGGING" }, \ + { EXTENT_FLAG_FILLING, "FILLING" }) -TRACE_EVENT(btrfs_get_extent, +TRACE_EVENT_CONDITION(btrfs_get_extent, TP_PROTO(struct btrfs_root *root, struct extent_map *map), TP_ARGS(root, map), + TP_CONDITION(map), + TP_STRUCT__entry( __field( u64, root_objectid ) __field( u64, start ) @@ -185,14 +210,19 @@ TRACE_EVENT(btrfs_get_extent, __entry->refs, __entry->compress_type) ); -#define show_ordered_flags(flags) \ - __print_symbolic(flags, \ - { BTRFS_ORDERED_IO_DONE, "IO_DONE" }, \ - { BTRFS_ORDERED_COMPLETE, "COMPLETE" }, \ - { BTRFS_ORDERED_NOCOW, "NOCOW" }, \ - { BTRFS_ORDERED_COMPRESSED, "COMPRESSED" }, \ - { BTRFS_ORDERED_PREALLOC, "PREALLOC" }, \ - { BTRFS_ORDERED_DIRECT, "DIRECT" }) +#define show_ordered_flags(flags) \ + __print_flags(flags, "|", \ + { (1 << BTRFS_ORDERED_IO_DONE), "IO_DONE" }, \ + { (1 << BTRFS_ORDERED_COMPLETE), "COMPLETE" }, \ + { (1 << BTRFS_ORDERED_NOCOW), "NOCOW" }, \ + { (1 << BTRFS_ORDERED_COMPRESSED), "COMPRESSED" }, \ + { (1 << BTRFS_ORDERED_PREALLOC), "PREALLOC" }, \ + { (1 << BTRFS_ORDERED_DIRECT), "DIRECT" }, \ + { (1 << BTRFS_ORDERED_IOERR), "IOERR" }, \ + { (1 << BTRFS_ORDERED_UPDATED_ISIZE), "UPDATED_ISIZE" }, \ + { (1 << BTRFS_ORDERED_LOGGED_CSUM), "LOGGED_CSUM" }, \ + { (1 << BTRFS_ORDERED_TRUNCATED), "TRUNCATED" }) + DECLARE_EVENT_CLASS(btrfs__ordered_extent, @@ -284,7 +314,6 @@ DECLARE_EVENT_CLASS(btrfs__writepage, __field( long, pages_skipped ) __field( loff_t, range_start ) __field( loff_t, range_end ) - __field( char, nonblocking ) __field( char, for_kupdate ) __field( char, for_reclaim ) __field( char, range_cyclic ) @@ -299,7 +328,6 @@ DECLARE_EVENT_CLASS(btrfs__writepage, __entry->pages_skipped = wbc->pages_skipped; __entry->range_start = wbc->range_start; __entry->range_end = wbc->range_end; - __entry->nonblocking = wbc->nonblocking; __entry->for_kupdate = wbc->for_kupdate; __entry->for_reclaim = wbc->for_reclaim; __entry->range_cyclic = wbc->range_cyclic; @@ -310,13 +338,13 @@ DECLARE_EVENT_CLASS(btrfs__writepage, TP_printk("root = %llu(%s), ino = %lu, page_index = %lu, " "nr_to_write = %ld, pages_skipped = %ld, range_start = %llu, " - "range_end = %llu, nonblocking = %d, for_kupdate = %d, " + "range_end = %llu, for_kupdate = %d, " "for_reclaim = %d, range_cyclic = %d, writeback_index = %lu", show_root_type(__entry->root_objectid), (unsigned long)__entry->ino, __entry->index, __entry->nr_to_write, __entry->pages_skipped, __entry->range_start, __entry->range_end, - __entry->nonblocking, __entry->for_kupdate, + __entry->for_kupdate, __entry->for_reclaim, __entry->range_cyclic, (unsigned long)__entry->writeback_index) ); @@ -417,7 +445,7 @@ TRACE_EVENT(btrfs_sync_fs, { BTRFS_UPDATE_DELAYED_HEAD, "UPDATE_DELAYED_HEAD" }) -TRACE_EVENT(btrfs_delayed_tree_ref, +DECLARE_EVENT_CLASS(btrfs_delayed_tree_ref, TP_PROTO(struct btrfs_delayed_ref_node *ref, struct btrfs_delayed_tree_ref *full_ref, @@ -433,6 +461,7 @@ TRACE_EVENT(btrfs_delayed_tree_ref, __field( u64, ref_root ) __field( int, level ) __field( int, type ) + __field( u64, seq ) ), TP_fast_assign( @@ -443,20 +472,40 @@ TRACE_EVENT(btrfs_delayed_tree_ref, __entry->ref_root = full_ref->root; __entry->level = full_ref->level; __entry->type = ref->type; + __entry->seq = ref->seq; ), TP_printk("bytenr = %llu, num_bytes = %llu, action = %s, " "parent = %llu(%s), ref_root = %llu(%s), level = %d, " - "type = %s", + "type = %s, seq = %llu", (unsigned long long)__entry->bytenr, (unsigned long long)__entry->num_bytes, show_ref_action(__entry->action), show_root_type(__entry->parent), show_root_type(__entry->ref_root), - __entry->level, show_ref_type(__entry->type)) + __entry->level, show_ref_type(__entry->type), + (unsigned long long)__entry->seq) +); + +DEFINE_EVENT(btrfs_delayed_tree_ref, add_delayed_tree_ref, + + TP_PROTO(struct btrfs_delayed_ref_node *ref, + struct btrfs_delayed_tree_ref *full_ref, + int action), + + TP_ARGS(ref, full_ref, action) +); + +DEFINE_EVENT(btrfs_delayed_tree_ref, run_delayed_tree_ref, + + TP_PROTO(struct btrfs_delayed_ref_node *ref, + struct btrfs_delayed_tree_ref *full_ref, + int action), + + TP_ARGS(ref, full_ref, action) ); -TRACE_EVENT(btrfs_delayed_data_ref, +DECLARE_EVENT_CLASS(btrfs_delayed_data_ref, TP_PROTO(struct btrfs_delayed_ref_node *ref, struct btrfs_delayed_data_ref *full_ref, @@ -473,6 +522,7 @@ TRACE_EVENT(btrfs_delayed_data_ref, __field( u64, owner ) __field( u64, offset ) __field( int, type ) + __field( u64, seq ) ), TP_fast_assign( @@ -484,11 +534,12 @@ TRACE_EVENT(btrfs_delayed_data_ref, __entry->owner = full_ref->objectid; __entry->offset = full_ref->offset; __entry->type = ref->type; + __entry->seq = ref->seq; ), TP_printk("bytenr = %llu, num_bytes = %llu, action = %s, " "parent = %llu(%s), ref_root = %llu(%s), owner = %llu, " - "offset = %llu, type = %s", + "offset = %llu, type = %s, seq = %llu", (unsigned long long)__entry->bytenr, (unsigned long long)__entry->num_bytes, show_ref_action(__entry->action), @@ -496,10 +547,29 @@ TRACE_EVENT(btrfs_delayed_data_ref, show_root_type(__entry->ref_root), (unsigned long long)__entry->owner, (unsigned long long)__entry->offset, - show_ref_type(__entry->type)) + show_ref_type(__entry->type), + (unsigned long long)__entry->seq) +); + +DEFINE_EVENT(btrfs_delayed_data_ref, add_delayed_data_ref, + + TP_PROTO(struct btrfs_delayed_ref_node *ref, + struct btrfs_delayed_data_ref *full_ref, + int action), + + TP_ARGS(ref, full_ref, action) ); -TRACE_EVENT(btrfs_delayed_ref_head, +DEFINE_EVENT(btrfs_delayed_data_ref, run_delayed_data_ref, + + TP_PROTO(struct btrfs_delayed_ref_node *ref, + struct btrfs_delayed_data_ref *full_ref, + int action), + + TP_ARGS(ref, full_ref, action) +); + +DECLARE_EVENT_CLASS(btrfs_delayed_ref_head, TP_PROTO(struct btrfs_delayed_ref_node *ref, struct btrfs_delayed_ref_head *head_ref, @@ -528,6 +598,24 @@ TRACE_EVENT(btrfs_delayed_ref_head, __entry->is_data) ); +DEFINE_EVENT(btrfs_delayed_ref_head, add_delayed_ref_head, + + TP_PROTO(struct btrfs_delayed_ref_node *ref, + struct btrfs_delayed_ref_head *head_ref, + int action), + + TP_ARGS(ref, head_ref, action) +); + +DEFINE_EVENT(btrfs_delayed_ref_head, run_delayed_ref_head, + + TP_PROTO(struct btrfs_delayed_ref_node *ref, + struct btrfs_delayed_ref_head *head_ref, + int action), + + TP_ARGS(ref, head_ref, action) +); + #define show_chunk_type(type) \ __print_flags(type, "|", \ { BTRFS_BLOCK_GROUP_DATA, "DATA" }, \ @@ -536,7 +624,9 @@ TRACE_EVENT(btrfs_delayed_ref_head, { BTRFS_BLOCK_GROUP_RAID0, "RAID0" }, \ { BTRFS_BLOCK_GROUP_RAID1, "RAID1" }, \ { BTRFS_BLOCK_GROUP_DUP, "DUP" }, \ - { BTRFS_BLOCK_GROUP_RAID10, "RAID10"}) + { BTRFS_BLOCK_GROUP_RAID10, "RAID10"}, \ + { BTRFS_BLOCK_GROUP_RAID5, "RAID5" }, \ + { BTRFS_BLOCK_GROUP_RAID6, "RAID6" }) DECLARE_EVENT_CLASS(btrfs__chunk, @@ -623,6 +713,34 @@ TRACE_EVENT(btrfs_cow_block, __entry->cow_level) ); +TRACE_EVENT(btrfs_space_reservation, + + TP_PROTO(struct btrfs_fs_info *fs_info, char *type, u64 val, + u64 bytes, int reserve), + + TP_ARGS(fs_info, type, val, bytes, reserve), + + TP_STRUCT__entry( + __array( u8, fsid, BTRFS_UUID_SIZE ) + __string( type, type ) + __field( u64, val ) + __field( u64, bytes ) + __field( int, reserve ) + ), + + TP_fast_assign( + memcpy(__entry->fsid, fs_info->fsid, BTRFS_UUID_SIZE); + __assign_str(type, type); + __entry->val = val; + __entry->bytes = bytes; + __entry->reserve = reserve; + ), + + TP_printk("%pU: %s: %Lu %s %Lu", __entry->fsid, __get_str(type), + __entry->val, __entry->reserve ? "reserve" : "release", + __entry->bytes) +); + DECLARE_EVENT_CLASS(btrfs__reserved_extent, TP_PROTO(struct btrfs_root *root, u64 start, u64 len), @@ -661,6 +779,346 @@ DEFINE_EVENT(btrfs__reserved_extent, btrfs_reserved_extent_free, TP_ARGS(root, start, len) ); +TRACE_EVENT(find_free_extent, + + TP_PROTO(struct btrfs_root *root, u64 num_bytes, u64 empty_size, + u64 data), + + TP_ARGS(root, num_bytes, empty_size, data), + + TP_STRUCT__entry( + __field( u64, root_objectid ) + __field( u64, num_bytes ) + __field( u64, empty_size ) + __field( u64, data ) + ), + + TP_fast_assign( + __entry->root_objectid = root->root_key.objectid; + __entry->num_bytes = num_bytes; + __entry->empty_size = empty_size; + __entry->data = data; + ), + + TP_printk("root = %Lu(%s), len = %Lu, empty_size = %Lu, " + "flags = %Lu(%s)", show_root_type(__entry->root_objectid), + __entry->num_bytes, __entry->empty_size, __entry->data, + __print_flags((unsigned long)__entry->data, "|", + BTRFS_GROUP_FLAGS)) +); + +DECLARE_EVENT_CLASS(btrfs__reserve_extent, + + TP_PROTO(struct btrfs_root *root, + struct btrfs_block_group_cache *block_group, u64 start, + u64 len), + + TP_ARGS(root, block_group, start, len), + + TP_STRUCT__entry( + __field( u64, root_objectid ) + __field( u64, bg_objectid ) + __field( u64, flags ) + __field( u64, start ) + __field( u64, len ) + ), + + TP_fast_assign( + __entry->root_objectid = root->root_key.objectid; + __entry->bg_objectid = block_group->key.objectid; + __entry->flags = block_group->flags; + __entry->start = start; + __entry->len = len; + ), + + TP_printk("root = %Lu(%s), block_group = %Lu, flags = %Lu(%s), " + "start = %Lu, len = %Lu", + show_root_type(__entry->root_objectid), __entry->bg_objectid, + __entry->flags, __print_flags((unsigned long)__entry->flags, + "|", BTRFS_GROUP_FLAGS), + __entry->start, __entry->len) +); + +DEFINE_EVENT(btrfs__reserve_extent, btrfs_reserve_extent, + + TP_PROTO(struct btrfs_root *root, + struct btrfs_block_group_cache *block_group, u64 start, + u64 len), + + TP_ARGS(root, block_group, start, len) +); + +DEFINE_EVENT(btrfs__reserve_extent, btrfs_reserve_extent_cluster, + + TP_PROTO(struct btrfs_root *root, + struct btrfs_block_group_cache *block_group, u64 start, + u64 len), + + TP_ARGS(root, block_group, start, len) +); + +TRACE_EVENT(btrfs_find_cluster, + + TP_PROTO(struct btrfs_block_group_cache *block_group, u64 start, + u64 bytes, u64 empty_size, u64 min_bytes), + + TP_ARGS(block_group, start, bytes, empty_size, min_bytes), + + TP_STRUCT__entry( + __field( u64, bg_objectid ) + __field( u64, flags ) + __field( u64, start ) + __field( u64, bytes ) + __field( u64, empty_size ) + __field( u64, min_bytes ) + ), + + TP_fast_assign( + __entry->bg_objectid = block_group->key.objectid; + __entry->flags = block_group->flags; + __entry->start = start; + __entry->bytes = bytes; + __entry->empty_size = empty_size; + __entry->min_bytes = min_bytes; + ), + + TP_printk("block_group = %Lu, flags = %Lu(%s), start = %Lu, len = %Lu," + " empty_size = %Lu, min_bytes = %Lu", __entry->bg_objectid, + __entry->flags, + __print_flags((unsigned long)__entry->flags, "|", + BTRFS_GROUP_FLAGS), __entry->start, + __entry->bytes, __entry->empty_size, __entry->min_bytes) +); + +TRACE_EVENT(btrfs_failed_cluster_setup, + + TP_PROTO(struct btrfs_block_group_cache *block_group), + + TP_ARGS(block_group), + + TP_STRUCT__entry( + __field( u64, bg_objectid ) + ), + + TP_fast_assign( + __entry->bg_objectid = block_group->key.objectid; + ), + + TP_printk("block_group = %Lu", __entry->bg_objectid) +); + +TRACE_EVENT(btrfs_setup_cluster, + + TP_PROTO(struct btrfs_block_group_cache *block_group, + struct btrfs_free_cluster *cluster, u64 size, int bitmap), + + TP_ARGS(block_group, cluster, size, bitmap), + + TP_STRUCT__entry( + __field( u64, bg_objectid ) + __field( u64, flags ) + __field( u64, start ) + __field( u64, max_size ) + __field( u64, size ) + __field( int, bitmap ) + ), + + TP_fast_assign( + __entry->bg_objectid = block_group->key.objectid; + __entry->flags = block_group->flags; + __entry->start = cluster->window_start; + __entry->max_size = cluster->max_size; + __entry->size = size; + __entry->bitmap = bitmap; + ), + + TP_printk("block_group = %Lu, flags = %Lu(%s), window_start = %Lu, " + "size = %Lu, max_size = %Lu, bitmap = %d", + __entry->bg_objectid, + __entry->flags, + __print_flags((unsigned long)__entry->flags, "|", + BTRFS_GROUP_FLAGS), __entry->start, + __entry->size, __entry->max_size, __entry->bitmap) +); + +struct extent_state; +TRACE_EVENT(alloc_extent_state, + + TP_PROTO(struct extent_state *state, gfp_t mask, unsigned long IP), + + TP_ARGS(state, mask, IP), + + TP_STRUCT__entry( + __field(struct extent_state *, state) + __field(gfp_t, mask) + __field(unsigned long, ip) + ), + + TP_fast_assign( + __entry->state = state, + __entry->mask = mask, + __entry->ip = IP + ), + + TP_printk("state=%p; mask = %s; caller = %pF", __entry->state, + show_gfp_flags(__entry->mask), (void *)__entry->ip) +); + +TRACE_EVENT(free_extent_state, + + TP_PROTO(struct extent_state *state, unsigned long IP), + + TP_ARGS(state, IP), + + TP_STRUCT__entry( + __field(struct extent_state *, state) + __field(unsigned long, ip) + ), + + TP_fast_assign( + __entry->state = state, + __entry->ip = IP + ), + + TP_printk(" state=%p; caller = %pF", __entry->state, + (void *)__entry->ip) +); + +DECLARE_EVENT_CLASS(btrfs__work, + + TP_PROTO(struct btrfs_work *work), + + TP_ARGS(work), + + TP_STRUCT__entry( + __field( void *, work ) + __field( void *, wq ) + __field( void *, func ) + __field( void *, ordered_func ) + __field( void *, ordered_free ) + ), + + TP_fast_assign( + __entry->work = work; + __entry->wq = work->wq; + __entry->func = work->func; + __entry->ordered_func = work->ordered_func; + __entry->ordered_free = work->ordered_free; + ), + + TP_printk("work=%p, wq=%p, func=%p, ordered_func=%p, ordered_free=%p", + __entry->work, __entry->wq, __entry->func, + __entry->ordered_func, __entry->ordered_free) +); + +/* For situiations that the work is freed */ +DECLARE_EVENT_CLASS(btrfs__work__done, + + TP_PROTO(struct btrfs_work *work), + + TP_ARGS(work), + + TP_STRUCT__entry( + __field( void *, work ) + ), + + TP_fast_assign( + __entry->work = work; + ), + + TP_printk("work->%p", __entry->work) +); + +DEFINE_EVENT(btrfs__work, btrfs_work_queued, + + TP_PROTO(struct btrfs_work *work), + + TP_ARGS(work) +); + +DEFINE_EVENT(btrfs__work, btrfs_work_sched, + + TP_PROTO(struct btrfs_work *work), + + TP_ARGS(work) +); + +DEFINE_EVENT(btrfs__work, btrfs_normal_work_done, + + TP_PROTO(struct btrfs_work *work), + + TP_ARGS(work) +); + +DEFINE_EVENT(btrfs__work__done, btrfs_all_work_done, + + TP_PROTO(struct btrfs_work *work), + + TP_ARGS(work) +); + +DEFINE_EVENT(btrfs__work, btrfs_ordered_sched, + + TP_PROTO(struct btrfs_work *work), + + TP_ARGS(work) +); + +DECLARE_EVENT_CLASS(btrfs__workqueue, + + TP_PROTO(struct __btrfs_workqueue *wq, const char *name, int high), + + TP_ARGS(wq, name, high), + + TP_STRUCT__entry( + __field( void *, wq ) + __string( name, name ) + __field( int , high ) + ), + + TP_fast_assign( + __entry->wq = wq; + __assign_str(name, name); + __entry->high = high; + ), + + TP_printk("name=%s%s, wq=%p", __get_str(name), + __print_flags(__entry->high, "", + {(WQ_HIGHPRI), "-high"}), + __entry->wq) +); + +DEFINE_EVENT(btrfs__workqueue, btrfs_workqueue_alloc, + + TP_PROTO(struct __btrfs_workqueue *wq, const char *name, int high), + + TP_ARGS(wq, name, high) +); + +DECLARE_EVENT_CLASS(btrfs__workqueue_done, + + TP_PROTO(struct __btrfs_workqueue *wq), + + TP_ARGS(wq), + + TP_STRUCT__entry( + __field( void *, wq ) + ), + + TP_fast_assign( + __entry->wq = wq; + ), + + TP_printk("wq=%p", __entry->wq) +); + +DEFINE_EVENT(btrfs__workqueue_done, btrfs_workqueue_destroy, + + TP_PROTO(struct __btrfs_workqueue *wq), + + TP_ARGS(wq) +); + #endif /* _TRACE_BTRFS_H */ /* This part must be outside protection */ |
