mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 04:18:39 +08:00
block: simplify blk_mq_plug
Drop the unused q argument, and invert the check to move the exception into a branch and the regular path as the normal return. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com> Reviewed-by: Damien Le Moal <damien.lemoal@opensource.wdc.com> Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com> Link: https://lore.kernel.org/r/20220706070350.1703384-5-hch@lst.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
edd1dbc83b
commit
6deacb3bfa
@ -719,7 +719,7 @@ void submit_bio_noacct(struct bio *bio)
|
||||
|
||||
might_sleep();
|
||||
|
||||
plug = blk_mq_plug(q, bio);
|
||||
plug = blk_mq_plug(bio);
|
||||
if (plug && plug->nowait)
|
||||
bio->bi_opf |= REQ_NOWAIT;
|
||||
|
||||
|
@ -1051,7 +1051,7 @@ bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
|
||||
struct blk_plug *plug;
|
||||
struct request *rq;
|
||||
|
||||
plug = blk_mq_plug(q, bio);
|
||||
plug = blk_mq_plug(bio);
|
||||
if (!plug || rq_list_empty(plug->mq_list))
|
||||
return false;
|
||||
|
||||
|
@ -2808,7 +2808,7 @@ static void bio_set_ioprio(struct bio *bio)
|
||||
void blk_mq_submit_bio(struct bio *bio)
|
||||
{
|
||||
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
|
||||
struct blk_plug *plug = blk_mq_plug(q, bio);
|
||||
struct blk_plug *plug = blk_mq_plug(bio);
|
||||
const int is_sync = op_is_sync(bio->bi_opf);
|
||||
struct request *rq;
|
||||
unsigned int nr_segs = 1;
|
||||
|
@ -294,7 +294,6 @@ static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap)
|
||||
|
||||
/*
|
||||
* blk_mq_plug() - Get caller context plug
|
||||
* @q: request queue
|
||||
* @bio : the bio being submitted by the caller context
|
||||
*
|
||||
* Plugging, by design, may delay the insertion of BIOs into the elevator in
|
||||
@ -305,23 +304,22 @@ static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap)
|
||||
* order. While this is not a problem with regular block devices, this ordering
|
||||
* change can cause write BIO failures with zoned block devices as these
|
||||
* require sequential write patterns to zones. Prevent this from happening by
|
||||
* ignoring the plug state of a BIO issuing context if the target request queue
|
||||
* is for a zoned block device and the BIO to plug is a write operation.
|
||||
* ignoring the plug state of a BIO issuing context if it is for a zoned block
|
||||
* device and the BIO to plug is a write operation.
|
||||
*
|
||||
* Return current->plug if the bio can be plugged and NULL otherwise
|
||||
*/
|
||||
static inline struct blk_plug *blk_mq_plug(struct request_queue *q,
|
||||
struct bio *bio)
|
||||
static inline struct blk_plug *blk_mq_plug( struct bio *bio)
|
||||
{
|
||||
/* Zoned block device write operation case: do not plug the BIO */
|
||||
if (bdev_is_zoned(bio->bi_bdev) && op_is_write(bio_op(bio)))
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* For regular block devices or read operations, use the context plug
|
||||
* which may be NULL if blk_start_plug() was not executed.
|
||||
*/
|
||||
if (!bdev_is_zoned(bio->bi_bdev) || !op_is_write(bio_op(bio)))
|
||||
return current->plug;
|
||||
|
||||
/* Zoned block device write operation case: do not plug the BIO */
|
||||
return NULL;
|
||||
return current->plug;
|
||||
}
|
||||
|
||||
/* Free all requests on the list */
|
||||
|
Loading…
Reference in New Issue
Block a user