diff options
Diffstat (limited to 'kernel/trace/blktrace.c')
| -rw-r--r-- | kernel/trace/blktrace.c | 74 | 
1 files changed, 49 insertions, 25 deletions
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index b8b8560bfb9..c1bd4ada2a0 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c @@ -26,6 +26,7 @@  #include <linux/export.h>  #include <linux/time.h>  #include <linux/uaccess.h> +#include <linux/list.h>  #include <trace/events/block.h> @@ -38,6 +39,9 @@ static unsigned int blktrace_seq __read_mostly = 1;  static struct trace_array *blk_tr;  static bool blk_tracer_enabled __read_mostly; +static LIST_HEAD(running_trace_list); +static __cacheline_aligned_in_smp DEFINE_SPINLOCK(running_trace_lock); +  /* Select an alternative, minimalistic output than the original one */  #define TRACE_BLK_OPT_CLASSIC	0x1 @@ -107,10 +111,18 @@ record_it:   * Send out a notify for this process, if we haven't done so since a trace   * started   */ -static void trace_note_tsk(struct blk_trace *bt, struct task_struct *tsk) +static void trace_note_tsk(struct task_struct *tsk)  { +	unsigned long flags; +	struct blk_trace *bt; +  	tsk->btrace_seq = blktrace_seq; -	trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm, sizeof(tsk->comm)); +	spin_lock_irqsave(&running_trace_lock, flags); +	list_for_each_entry(bt, &running_trace_list, running_list) { +		trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm, +			   sizeof(tsk->comm)); +	} +	spin_unlock_irqrestore(&running_trace_lock, flags);  }  static void trace_note_time(struct blk_trace *bt) @@ -229,16 +241,15 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,  		goto record_it;  	} +	if (unlikely(tsk->btrace_seq != blktrace_seq)) +		trace_note_tsk(tsk); +  	/*  	 * A word about the locking here - we disable interrupts to reserve  	 * some space in the relay per-cpu buffer, to prevent an irq  	 * from coming in and stepping on our toes.  	 */  	local_irq_save(flags); - -	if (unlikely(tsk->btrace_seq != blktrace_seq)) -		trace_note_tsk(bt, tsk); -  	t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len);  	if (t) {  		sequence = per_cpu_ptr(bt->sequence, cpu); @@ -477,6 +488,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,  	bt->dir = dir;  	bt->dev = dev;  	atomic_set(&bt->dropped, 0); +	INIT_LIST_HEAD(&bt->running_list);  	ret = -EIO;  	bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt, @@ -567,13 +579,12 @@ static int compat_blk_trace_setup(struct request_queue *q, char *name,  		.end_lba = cbuts.end_lba,  		.pid = cbuts.pid,  	}; -	memcpy(&buts.name, &cbuts.name, 32);  	ret = do_blk_trace_setup(q, name, dev, bdev, &buts);  	if (ret)  		return ret; -	if (copy_to_user(arg, &buts.name, 32)) { +	if (copy_to_user(arg, &buts.name, ARRAY_SIZE(buts.name))) {  		blk_trace_remove(q);  		return -EFAULT;  	} @@ -601,6 +612,9 @@ int blk_trace_startstop(struct request_queue *q, int start)  			blktrace_seq++;  			smp_mb();  			bt->trace_state = Blktrace_running; +			spin_lock_irq(&running_trace_lock); +			list_add(&bt->running_list, &running_trace_list); +			spin_unlock_irq(&running_trace_lock);  			trace_note_time(bt);  			ret = 0; @@ -608,6 +622,9 @@ int blk_trace_startstop(struct request_queue *q, int start)  	} else {  		if (bt->trace_state == Blktrace_running) {  			bt->trace_state = Blktrace_stopped; +			spin_lock_irq(&running_trace_lock); +			list_del_init(&bt->running_list); +			spin_unlock_irq(&running_trace_lock);  			relay_flush(bt->rchan);  			ret = 0;  		} @@ -685,6 +702,7 @@ void blk_trace_shutdown(struct request_queue *q)   * blk_add_trace_rq - Add a trace for a request oriented action   * @q:		queue the io is for   * @rq:		the source request + * @nr_bytes:	number of completed bytes   * @what:	the action   *   * Description: @@ -692,7 +710,7 @@ void blk_trace_shutdown(struct request_queue *q)   *   **/  static void blk_add_trace_rq(struct request_queue *q, struct request *rq, -			     u32 what) +			     unsigned int nr_bytes, u32 what)  {  	struct blk_trace *bt = q->blk_trace; @@ -701,11 +719,11 @@ static void blk_add_trace_rq(struct request_queue *q, struct request *rq,  	if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {  		what |= BLK_TC_ACT(BLK_TC_PC); -		__blk_add_trace(bt, 0, blk_rq_bytes(rq), rq->cmd_flags, +		__blk_add_trace(bt, 0, nr_bytes, rq->cmd_flags,  				what, rq->errors, rq->cmd_len, rq->cmd);  	} else  {  		what |= BLK_TC_ACT(BLK_TC_FS); -		__blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), +		__blk_add_trace(bt, blk_rq_pos(rq), nr_bytes,  				rq->cmd_flags, what, rq->errors, 0, NULL);  	}  } @@ -713,33 +731,34 @@ static void blk_add_trace_rq(struct request_queue *q, struct request *rq,  static void blk_add_trace_rq_abort(void *ignore,  				   struct request_queue *q, struct request *rq)  { -	blk_add_trace_rq(q, rq, BLK_TA_ABORT); +	blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_ABORT);  }  static void blk_add_trace_rq_insert(void *ignore,  				    struct request_queue *q, struct request *rq)  { -	blk_add_trace_rq(q, rq, BLK_TA_INSERT); +	blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_INSERT);  }  static void blk_add_trace_rq_issue(void *ignore,  				   struct request_queue *q, struct request *rq)  { -	blk_add_trace_rq(q, rq, BLK_TA_ISSUE); +	blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_ISSUE);  }  static void blk_add_trace_rq_requeue(void *ignore,  				     struct request_queue *q,  				     struct request *rq)  { -	blk_add_trace_rq(q, rq, BLK_TA_REQUEUE); +	blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_REQUEUE);  }  static void blk_add_trace_rq_complete(void *ignore,  				      struct request_queue *q, -				      struct request *rq) +				      struct request *rq, +				      unsigned int nr_bytes)  { -	blk_add_trace_rq(q, rq, BLK_TA_COMPLETE); +	blk_add_trace_rq(q, rq, nr_bytes, BLK_TA_COMPLETE);  }  /** @@ -764,8 +783,8 @@ static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,  	if (!error && !bio_flagged(bio, BIO_UPTODATE))  		error = EIO; -	__blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what, -			error, 0, NULL); +	__blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size, +			bio->bi_rw, what, error, 0, NULL);  }  static void blk_add_trace_bio_bounce(void *ignore, @@ -868,8 +887,9 @@ static void blk_add_trace_split(void *ignore,  	if (bt) {  		__be64 rpdu = cpu_to_be64(pdu); -		__blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, -				BLK_TA_SPLIT, !bio_flagged(bio, BIO_UPTODATE), +		__blk_add_trace(bt, bio->bi_iter.bi_sector, +				bio->bi_iter.bi_size, bio->bi_rw, BLK_TA_SPLIT, +				!bio_flagged(bio, BIO_UPTODATE),  				sizeof(rpdu), &rpdu);  	}  } @@ -901,9 +921,9 @@ static void blk_add_trace_bio_remap(void *ignore,  	r.device_to   = cpu_to_be32(bio->bi_bdev->bd_dev);  	r.sector_from = cpu_to_be64(from); -	__blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, -			BLK_TA_REMAP, !bio_flagged(bio, BIO_UPTODATE), -			sizeof(r), &r); +	__blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size, +			bio->bi_rw, BLK_TA_REMAP, +			!bio_flagged(bio, BIO_UPTODATE), sizeof(r), &r);  }  /** @@ -1409,7 +1429,8 @@ static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter)  	return print_one_line(iter, true);  } -static int blk_tracer_set_flag(u32 old_flags, u32 bit, int set) +static int +blk_tracer_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)  {  	/* don't output context-info for blk_classic output */  	if (bit == TRACE_BLK_OPT_CLASSIC) { @@ -1472,6 +1493,9 @@ static int blk_trace_remove_queue(struct request_queue *q)  	if (atomic_dec_and_test(&blk_probes_ref))  		blk_unregister_tracepoints(); +	spin_lock_irq(&running_trace_lock); +	list_del(&bt->running_list); +	spin_unlock_irq(&running_trace_lock);  	blk_trace_free(bt);  	return 0;  }  | 
