mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-28 06:34:12 +08:00
block: make rq sector size accessible for block stats
Currently rq->data_len will be decreased by partial completion or
zeroed by completion, so when blk_stat_add() is invoked, data_len
will be zero and there will never be samples in poll_cb because
blk_mq_poll_stats_bkt() will return -1 if data_len is zero.
We could move blk_stat_add() back to __blk_mq_complete_request(),
but that would make the effort of trying to call ktime_get_ns()
once in vain. Instead we can reuse throtl_size field, and use
it for both block stats and block throttle, and adjust the
logic in blk_mq_poll_stats_bkt() accordingly.
Fixes: 4bc6339a58
("block: move blk_stat_add() to __blk_mq_end_request()")
Tested-by: Pavel Begunkov <asml.silence@gmail.com>
Signed-off-by: Hou Tao <houtao1@huawei.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
89f3b6d62f
commit
3d24430694
@ -44,12 +44,12 @@ static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb);
|
||||
|
||||
static int blk_mq_poll_stats_bkt(const struct request *rq)
|
||||
{
|
||||
int ddir, bytes, bucket;
|
||||
int ddir, sectors, bucket;
|
||||
|
||||
ddir = rq_data_dir(rq);
|
||||
bytes = blk_rq_bytes(rq);
|
||||
sectors = blk_rq_stats_sectors(rq);
|
||||
|
||||
bucket = ddir + 2*(ilog2(bytes) - 9);
|
||||
bucket = ddir + 2 * ilog2(sectors);
|
||||
|
||||
if (bucket < 0)
|
||||
return -1;
|
||||
@ -333,6 +333,7 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
|
||||
else
|
||||
rq->start_time_ns = 0;
|
||||
rq->io_start_time_ns = 0;
|
||||
rq->stats_sectors = 0;
|
||||
rq->nr_phys_segments = 0;
|
||||
#if defined(CONFIG_BLK_DEV_INTEGRITY)
|
||||
rq->nr_integrity_segments = 0;
|
||||
@ -681,9 +682,7 @@ void blk_mq_start_request(struct request *rq)
|
||||
|
||||
if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
|
||||
rq->io_start_time_ns = ktime_get_ns();
|
||||
#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
|
||||
rq->throtl_size = blk_rq_sectors(rq);
|
||||
#endif
|
||||
rq->stats_sectors = blk_rq_sectors(rq);
|
||||
rq->rq_flags |= RQF_STATS;
|
||||
rq_qos_issue(q, rq);
|
||||
}
|
||||
|
@ -2248,7 +2248,8 @@ void blk_throtl_stat_add(struct request *rq, u64 time_ns)
|
||||
struct request_queue *q = rq->q;
|
||||
struct throtl_data *td = q->td;
|
||||
|
||||
throtl_track_latency(td, rq->throtl_size, req_op(rq), time_ns >> 10);
|
||||
throtl_track_latency(td, blk_rq_stats_sectors(rq), req_op(rq),
|
||||
time_ns >> 10);
|
||||
}
|
||||
|
||||
void blk_throtl_bio_endio(struct bio *bio)
|
||||
|
@ -206,9 +206,12 @@ struct request {
|
||||
#ifdef CONFIG_BLK_WBT
|
||||
unsigned short wbt_flags;
|
||||
#endif
|
||||
#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
|
||||
unsigned short throtl_size;
|
||||
#endif
|
||||
/*
|
||||
* rq sectors used for blk stats. It has the same value
|
||||
* with blk_rq_sectors(rq), except that it never be zeroed
|
||||
* by completion.
|
||||
*/
|
||||
unsigned short stats_sectors;
|
||||
|
||||
/*
|
||||
* Number of scatter-gather DMA addr+len pairs after
|
||||
@ -917,6 +920,7 @@ static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
|
||||
* blk_rq_err_bytes() : bytes left till the next error boundary
|
||||
* blk_rq_sectors() : sectors left in the entire request
|
||||
* blk_rq_cur_sectors() : sectors left in the current segment
|
||||
* blk_rq_stats_sectors() : sectors of the entire request used for stats
|
||||
*/
|
||||
static inline sector_t blk_rq_pos(const struct request *rq)
|
||||
{
|
||||
@ -945,6 +949,11 @@ static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
|
||||
return blk_rq_cur_bytes(rq) >> SECTOR_SHIFT;
|
||||
}
|
||||
|
||||
static inline unsigned int blk_rq_stats_sectors(const struct request *rq)
|
||||
{
|
||||
return rq->stats_sectors;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_ZONED
|
||||
static inline unsigned int blk_rq_zone_no(struct request *rq)
|
||||
{
|
||||
|
Loading…
Reference in New Issue
Block a user