block: simplify and extend the block_bio_merge tracepoint class

The block_bio_merge tracepoint class can be reused for most bio-based
tracepoints.  For that it just needs to lose the superfluous q and rq
parameters.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Damien Le Moal <damien.lemoal@wdc.com>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Reviewed-by: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
Acked-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Christoph Hellwig 2020-12-03 17:21:36 +01:00 committed by Jens Axboe
parent b81b8f40c5
commit e8a676d61c
6 changed files with 48 additions and 161 deletions

View File

@ -907,7 +907,7 @@ static noinline_for_stack bool submit_bio_checks(struct bio *bio)
blkcg_bio_issue_init(bio);
if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) {
trace_block_bio_queue(q, bio);
trace_block_bio_queue(bio);
/* Now that enqueuing has been traced, we need to trace
* completion as well.
*/

View File

@ -922,7 +922,7 @@ static enum bio_merge_status bio_attempt_back_merge(struct request *req,
if (!ll_back_merge_fn(req, bio, nr_segs))
return BIO_MERGE_FAILED;
trace_block_bio_backmerge(req->q, req, bio);
trace_block_bio_backmerge(bio);
rq_qos_merge(req->q, req, bio);
if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
@ -946,7 +946,7 @@ static enum bio_merge_status bio_attempt_front_merge(struct request *req,
if (!ll_front_merge_fn(req, bio, nr_segs))
return BIO_MERGE_FAILED;
trace_block_bio_frontmerge(req->q, req, bio);
trace_block_bio_frontmerge(bio);
rq_qos_merge(req->q, req, bio);
if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)

View File

@ -2184,7 +2184,7 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio)
goto queue_exit;
}
trace_block_getrq(q, bio, bio->bi_opf);
trace_block_getrq(bio);
rq_qos_track(q, rq, bio);

View File

@ -340,7 +340,7 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
}
}
trace_block_bio_bounce(q, *bio_orig);
trace_block_bio_bounce(*bio_orig);
bio->bi_flags |= (1 << BIO_BOUNCED);

View File

@ -226,45 +226,6 @@ DEFINE_EVENT(block_rq, block_rq_merge,
TP_ARGS(q, rq)
);
/**
* block_bio_bounce - used bounce buffer when processing block operation
* @q: queue holding the block operation
* @bio: block operation
*
* A bounce buffer was used to handle the block operation @bio in @q.
* This occurs when hardware limitations prevent a direct transfer of
* data between the @bio data memory area and the IO device. Use of a
* bounce buffer requires extra copying of data and decreases
* performance.
*/
TRACE_EVENT(block_bio_bounce,
TP_PROTO(struct request_queue *q, struct bio *bio),
TP_ARGS(q, bio),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( sector_t, sector )
__field( unsigned int, nr_sector )
__array( char, rwbs, RWBS_LEN )
__array( char, comm, TASK_COMM_LEN )
),
TP_fast_assign(
__entry->dev = bio_dev(bio);
__entry->sector = bio->bi_iter.bi_sector;
__entry->nr_sector = bio_sectors(bio);
blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
),
TP_printk("%d,%d %s %llu + %u [%s]",
MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
(unsigned long long)__entry->sector,
__entry->nr_sector, __entry->comm)
);
/**
* block_bio_complete - completed all work on the block operation
* @q: queue holding the block operation
@ -301,11 +262,11 @@ TRACE_EVENT(block_bio_complete,
__entry->nr_sector, __entry->error)
);
DECLARE_EVENT_CLASS(block_bio_merge,
DECLARE_EVENT_CLASS(block_bio,
TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio),
TP_PROTO(struct bio *bio),
TP_ARGS(q, rq, bio),
TP_ARGS(bio),
TP_STRUCT__entry(
__field( dev_t, dev )
@ -329,116 +290,63 @@ DECLARE_EVENT_CLASS(block_bio_merge,
__entry->nr_sector, __entry->comm)
);
/**
* block_bio_bounce - used bounce buffer when processing block operation
* @bio: block operation
*
* A bounce buffer was used to handle the block operation @bio in @q.
* This occurs when hardware limitations prevent a direct transfer of
* data between the @bio data memory area and the IO device. Use of a
* bounce buffer requires extra copying of data and decreases
* performance.
*/
DEFINE_EVENT(block_bio, block_bio_bounce,
TP_PROTO(struct bio *bio),
TP_ARGS(bio)
);
/**
* block_bio_backmerge - merging block operation to the end of an existing operation
* @q: queue holding operation
* @rq: request bio is being merged into
* @bio: new block operation to merge
*
* Merging block request @bio to the end of an existing block request
* in queue @q.
* Merging block request @bio to the end of an existing block request.
*/
DEFINE_EVENT(block_bio_merge, block_bio_backmerge,
TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio),
TP_ARGS(q, rq, bio)
DEFINE_EVENT(block_bio, block_bio_backmerge,
TP_PROTO(struct bio *bio),
TP_ARGS(bio)
);
/**
* block_bio_frontmerge - merging block operation to the beginning of an existing operation
* @q: queue holding operation
* @rq: request bio is being merged into
* @bio: new block operation to merge
*
* Merging block IO operation @bio to the beginning of an existing block
* operation in queue @q.
* Merging block IO operation @bio to the beginning of an existing block request.
*/
DEFINE_EVENT(block_bio_merge, block_bio_frontmerge,
TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio),
TP_ARGS(q, rq, bio)
DEFINE_EVENT(block_bio, block_bio_frontmerge,
TP_PROTO(struct bio *bio),
TP_ARGS(bio)
);
/**
* block_bio_queue - putting new block IO operation in queue
* @q: queue holding operation
* @bio: new block operation
*
* About to place the block IO operation @bio into queue @q.
*/
TRACE_EVENT(block_bio_queue,
TP_PROTO(struct request_queue *q, struct bio *bio),
TP_ARGS(q, bio),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( sector_t, sector )
__field( unsigned int, nr_sector )
__array( char, rwbs, RWBS_LEN )
__array( char, comm, TASK_COMM_LEN )
),
TP_fast_assign(
__entry->dev = bio_dev(bio);
__entry->sector = bio->bi_iter.bi_sector;
__entry->nr_sector = bio_sectors(bio);
blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
),
TP_printk("%d,%d %s %llu + %u [%s]",
MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
(unsigned long long)__entry->sector,
__entry->nr_sector, __entry->comm)
);
DECLARE_EVENT_CLASS(block_get_rq,
TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
TP_ARGS(q, bio, rw),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( sector_t, sector )
__field( unsigned int, nr_sector )
__array( char, rwbs, RWBS_LEN )
__array( char, comm, TASK_COMM_LEN )
),
TP_fast_assign(
__entry->dev = bio ? bio_dev(bio) : 0;
__entry->sector = bio ? bio->bi_iter.bi_sector : 0;
__entry->nr_sector = bio ? bio_sectors(bio) : 0;
blk_fill_rwbs(__entry->rwbs,
bio ? bio->bi_opf : 0, __entry->nr_sector);
memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
),
TP_printk("%d,%d %s %llu + %u [%s]",
MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
(unsigned long long)__entry->sector,
__entry->nr_sector, __entry->comm)
DEFINE_EVENT(block_bio, block_bio_queue,
TP_PROTO(struct bio *bio),
TP_ARGS(bio)
);
/**
* block_getrq - get a free request entry in queue for block IO operations
* @q: queue for operations
* @bio: pending block IO operation (can be %NULL)
* @rw: low bit indicates a read (%0) or a write (%1)
*
* A request struct for queue @q has been allocated to handle the
* block IO operation @bio.
* A request struct has been allocated to handle the block IO operation @bio.
*/
DEFINE_EVENT(block_get_rq, block_getrq,
TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
TP_ARGS(q, bio, rw)
DEFINE_EVENT(block_bio, block_getrq,
TP_PROTO(struct bio *bio),
TP_ARGS(bio)
);
/**

View File

@ -906,10 +906,9 @@ static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
rcu_read_unlock();
}
static void blk_add_trace_bio_bounce(void *ignore,
struct request_queue *q, struct bio *bio)
static void blk_add_trace_bio_bounce(void *ignore, struct bio *bio)
{
blk_add_trace_bio(q, bio, BLK_TA_BOUNCE, 0);
blk_add_trace_bio(bio->bi_disk->queue, bio, BLK_TA_BOUNCE, 0);
}
static void blk_add_trace_bio_complete(void *ignore,
@ -919,44 +918,24 @@ static void blk_add_trace_bio_complete(void *ignore,
blk_status_to_errno(bio->bi_status));
}
static void blk_add_trace_bio_backmerge(void *ignore,
struct request_queue *q,
struct request *rq,
struct bio *bio)
static void blk_add_trace_bio_backmerge(void *ignore, struct bio *bio)
{
blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE, 0);
blk_add_trace_bio(bio->bi_disk->queue, bio, BLK_TA_BACKMERGE, 0);
}
static void blk_add_trace_bio_frontmerge(void *ignore,
struct request_queue *q,
struct request *rq,
struct bio *bio)
static void blk_add_trace_bio_frontmerge(void *ignore, struct bio *bio)
{
blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE, 0);
blk_add_trace_bio(bio->bi_disk->queue, bio, BLK_TA_FRONTMERGE, 0);
}
static void blk_add_trace_bio_queue(void *ignore,
struct request_queue *q, struct bio *bio)
static void blk_add_trace_bio_queue(void *ignore, struct bio *bio)
{
blk_add_trace_bio(q, bio, BLK_TA_QUEUE, 0);
blk_add_trace_bio(bio->bi_disk->queue, bio, BLK_TA_QUEUE, 0);
}
static void blk_add_trace_getrq(void *ignore,
struct request_queue *q,
struct bio *bio, int rw)
static void blk_add_trace_getrq(void *ignore, struct bio *bio)
{
if (bio)
blk_add_trace_bio(q, bio, BLK_TA_GETRQ, 0);
else {
struct blk_trace *bt;
rcu_read_lock();
bt = rcu_dereference(q->blk_trace);
if (bt)
__blk_add_trace(bt, 0, 0, rw, 0, BLK_TA_GETRQ, 0, 0,
NULL, 0);
rcu_read_unlock();
}
blk_add_trace_bio(bio->bi_disk->queue, bio, BLK_TA_GETRQ, 0);
}
static void blk_add_trace_plug(void *ignore, struct request_queue *q)