mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-23 20:24:12 +08:00
block: move blk_account_io_{start,done} to blk-mq.c
These are only used for request based I/O, so move them where they are used. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com> Link: https://lore.kernel.org/r/20211117061404.331732-9-hch@lst.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
f2b8f3ce98
commit
450b7879e3
@ -1064,8 +1064,7 @@ int iocb_bio_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iocb_bio_iopoll);
|
||||
|
||||
static void update_io_ticks(struct block_device *part, unsigned long now,
|
||||
bool end)
|
||||
void update_io_ticks(struct block_device *part, unsigned long now, bool end)
|
||||
{
|
||||
unsigned long stamp;
|
||||
again:
|
||||
@ -1080,30 +1079,6 @@ again:
|
||||
}
|
||||
}
|
||||
|
||||
void __blk_account_io_done(struct request *req, u64 now)
|
||||
{
|
||||
const int sgrp = op_stat_group(req_op(req));
|
||||
|
||||
part_stat_lock();
|
||||
update_io_ticks(req->part, jiffies, true);
|
||||
part_stat_inc(req->part, ios[sgrp]);
|
||||
part_stat_add(req->part, nsecs[sgrp], now - req->start_time_ns);
|
||||
part_stat_unlock();
|
||||
}
|
||||
|
||||
void __blk_account_io_start(struct request *rq)
|
||||
{
|
||||
/* passthrough requests can hold bios that do not have ->bi_bdev set */
|
||||
if (rq->bio && rq->bio->bi_bdev)
|
||||
rq->part = rq->bio->bi_bdev;
|
||||
else
|
||||
rq->part = rq->rq_disk->part0;
|
||||
|
||||
part_stat_lock();
|
||||
update_io_ticks(rq->part, jiffies, false);
|
||||
part_stat_unlock();
|
||||
}
|
||||
|
||||
static unsigned long __part_start_io_acct(struct block_device *part,
|
||||
unsigned int sectors, unsigned int op)
|
||||
{
|
||||
|
@ -809,6 +809,48 @@ bool blk_update_request(struct request *req, blk_status_t error,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_update_request);
|
||||
|
||||
static void __blk_account_io_done(struct request *req, u64 now)
|
||||
{
|
||||
const int sgrp = op_stat_group(req_op(req));
|
||||
|
||||
part_stat_lock();
|
||||
update_io_ticks(req->part, jiffies, true);
|
||||
part_stat_inc(req->part, ios[sgrp]);
|
||||
part_stat_add(req->part, nsecs[sgrp], now - req->start_time_ns);
|
||||
part_stat_unlock();
|
||||
}
|
||||
|
||||
static inline void blk_account_io_done(struct request *req, u64 now)
|
||||
{
|
||||
/*
|
||||
* Account IO completion. flush_rq isn't accounted as a
|
||||
* normal IO on queueing nor completion. Accounting the
|
||||
* containing request is enough.
|
||||
*/
|
||||
if (blk_do_io_stat(req) && req->part &&
|
||||
!(req->rq_flags & RQF_FLUSH_SEQ))
|
||||
__blk_account_io_done(req, now);
|
||||
}
|
||||
|
||||
static void __blk_account_io_start(struct request *rq)
|
||||
{
|
||||
/* passthrough requests can hold bios that do not have ->bi_bdev set */
|
||||
if (rq->bio && rq->bio->bi_bdev)
|
||||
rq->part = rq->bio->bi_bdev;
|
||||
else
|
||||
rq->part = rq->rq_disk->part0;
|
||||
|
||||
part_stat_lock();
|
||||
update_io_ticks(rq->part, jiffies, false);
|
||||
part_stat_unlock();
|
||||
}
|
||||
|
||||
static inline void blk_account_io_start(struct request *req)
|
||||
{
|
||||
if (blk_do_io_stat(req))
|
||||
__blk_account_io_start(req);
|
||||
}
|
||||
|
||||
static inline void __blk_mq_end_request_acct(struct request *rq, u64 now)
|
||||
{
|
||||
if (rq->rq_flags & RQF_STATS) {
|
||||
|
21
block/blk.h
21
block/blk.h
@ -257,9 +257,6 @@ bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
|
||||
bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
|
||||
struct bio *bio, unsigned int nr_segs);
|
||||
|
||||
void __blk_account_io_start(struct request *req);
|
||||
void __blk_account_io_done(struct request *req, u64 now);
|
||||
|
||||
/*
|
||||
* Plug flush limits
|
||||
*/
|
||||
@ -350,23 +347,7 @@ static inline bool blk_do_io_stat(struct request *rq)
|
||||
return (rq->rq_flags & RQF_IO_STAT) && rq->rq_disk;
|
||||
}
|
||||
|
||||
static inline void blk_account_io_done(struct request *req, u64 now)
|
||||
{
|
||||
/*
|
||||
* Account IO completion. flush_rq isn't accounted as a
|
||||
* normal IO on queueing nor completion. Accounting the
|
||||
* containing request is enough.
|
||||
*/
|
||||
if (blk_do_io_stat(req) && req->part &&
|
||||
!(req->rq_flags & RQF_FLUSH_SEQ))
|
||||
__blk_account_io_done(req, now);
|
||||
}
|
||||
|
||||
static inline void blk_account_io_start(struct request *req)
|
||||
{
|
||||
if (blk_do_io_stat(req))
|
||||
__blk_account_io_start(req);
|
||||
}
|
||||
void update_io_ticks(struct block_device *part, unsigned long now, bool end);
|
||||
|
||||
static inline void req_set_nomerge(struct request_queue *q, struct request *req)
|
||||
{
|
||||
|
Loading…
Reference in New Issue
Block a user