diff options
Diffstat (limited to 'include/trace/events/bcache.h')
| -rw-r--r-- | include/trace/events/bcache.h | 86 |
1 files changed, 52 insertions, 34 deletions
diff --git a/include/trace/events/bcache.h b/include/trace/events/bcache.h index e2b9576d00e..c9c3c044b32 100644 --- a/include/trace/events/bcache.h +++ b/include/trace/events/bcache.h @@ -24,10 +24,10 @@ DECLARE_EVENT_CLASS(bcache_request, __entry->dev = bio->bi_bdev->bd_dev; __entry->orig_major = d->disk->major; __entry->orig_minor = d->disk->first_minor; - __entry->sector = bio->bi_sector; - __entry->orig_sector = bio->bi_sector - 16; - __entry->nr_sector = bio->bi_size >> 9; - blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); + __entry->sector = bio->bi_iter.bi_sector; + __entry->orig_sector = bio->bi_iter.bi_sector - 16; + __entry->nr_sector = bio->bi_iter.bi_size >> 9; + blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size); ), TP_printk("%d,%d %s %llu + %u (from %d,%d @ %llu)", @@ -99,9 +99,9 @@ DECLARE_EVENT_CLASS(bcache_bio, TP_fast_assign( __entry->dev = bio->bi_bdev->bd_dev; - __entry->sector = bio->bi_sector; - __entry->nr_sector = bio->bi_size >> 9; - blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); + __entry->sector = bio->bi_iter.bi_sector; + __entry->nr_sector = bio->bi_iter.bi_size >> 9; + blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size); ), TP_printk("%d,%d %s %llu + %u", @@ -134,9 +134,9 @@ TRACE_EVENT(bcache_read, TP_fast_assign( __entry->dev = bio->bi_bdev->bd_dev; - __entry->sector = bio->bi_sector; - __entry->nr_sector = bio->bi_size >> 9; - blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); + __entry->sector = bio->bi_iter.bi_sector; + __entry->nr_sector = bio->bi_iter.bi_size >> 9; + blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size); __entry->cache_hit = hit; __entry->bypass = bypass; ), @@ -162,9 +162,9 @@ TRACE_EVENT(bcache_write, TP_fast_assign( __entry->dev = bio->bi_bdev->bd_dev; - __entry->sector = bio->bi_sector; - __entry->nr_sector = bio->bi_size >> 9; - blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); + __entry->sector = bio->bi_iter.bi_sector; + __entry->nr_sector = bio->bi_iter.bi_size >> 9; + blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size); __entry->writeback = writeback; __entry->bypass = bypass; ), @@ -247,7 +247,7 @@ TRACE_EVENT(bcache_btree_write, TP_fast_assign( __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0); __entry->block = b->written; - __entry->keys = b->sets[b->nsets].data->keys; + __entry->keys = b->keys.set[b->keys.nsets].data->keys; ), TP_printk("bucket %zu", __entry->bucket) @@ -399,48 +399,66 @@ TRACE_EVENT(bcache_keyscan, /* Allocator */ -TRACE_EVENT(bcache_alloc_invalidate, - TP_PROTO(struct cache *ca), - TP_ARGS(ca), +TRACE_EVENT(bcache_invalidate, + TP_PROTO(struct cache *ca, size_t bucket), + TP_ARGS(ca, bucket), TP_STRUCT__entry( - __field(unsigned, free ) - __field(unsigned, free_inc ) - __field(unsigned, free_inc_size ) - __field(unsigned, unused ) + __field(unsigned, sectors ) + __field(dev_t, dev ) + __field(__u64, offset ) ), TP_fast_assign( - __entry->free = fifo_used(&ca->free); - __entry->free_inc = fifo_used(&ca->free_inc); - __entry->free_inc_size = ca->free_inc.size; - __entry->unused = fifo_used(&ca->unused); + __entry->dev = ca->bdev->bd_dev; + __entry->offset = bucket << ca->set->bucket_bits; + __entry->sectors = GC_SECTORS_USED(&ca->buckets[bucket]); ), - TP_printk("free %u free_inc %u/%u unused %u", __entry->free, - __entry->free_inc, __entry->free_inc_size, __entry->unused) + TP_printk("invalidated %u sectors at %d,%d sector=%llu", + __entry->sectors, MAJOR(__entry->dev), + MINOR(__entry->dev), __entry->offset) +); + +TRACE_EVENT(bcache_alloc, + TP_PROTO(struct cache *ca, size_t bucket), + TP_ARGS(ca, bucket), + + TP_STRUCT__entry( + __field(dev_t, dev ) + __field(__u64, offset ) + ), + + TP_fast_assign( + __entry->dev = ca->bdev->bd_dev; + __entry->offset = bucket << ca->set->bucket_bits; + ), + + TP_printk("allocated %d,%d sector=%llu", MAJOR(__entry->dev), + MINOR(__entry->dev), __entry->offset) ); TRACE_EVENT(bcache_alloc_fail, - TP_PROTO(struct cache *ca), - TP_ARGS(ca), + TP_PROTO(struct cache *ca, unsigned reserve), + TP_ARGS(ca, reserve), TP_STRUCT__entry( + __field(dev_t, dev ) __field(unsigned, free ) __field(unsigned, free_inc ) - __field(unsigned, unused ) __field(unsigned, blocked ) ), TP_fast_assign( - __entry->free = fifo_used(&ca->free); + __entry->dev = ca->bdev->bd_dev; + __entry->free = fifo_used(&ca->free[reserve]); __entry->free_inc = fifo_used(&ca->free_inc); - __entry->unused = fifo_used(&ca->unused); __entry->blocked = atomic_read(&ca->set->prio_blocked); ), - TP_printk("free %u free_inc %u unused %u blocked %u", __entry->free, - __entry->free_inc, __entry->unused, __entry->blocked) + TP_printk("alloc fail %d,%d free %u free_inc %u blocked %u", + MAJOR(__entry->dev), MINOR(__entry->dev), __entry->free, + __entry->free_inc, __entry->blocked) ); /* Background writeback */ |
