block: Call .limit_depth() after .hctx has been set

commit 6259151c04 upstream.

Call .limit_depth() after data->hctx has been set such that data->hctx can
be used in .limit_depth() implementations.

Cc: Christoph Hellwig <hch@lst.de>
Cc: Damien Le Moal <dlemoal@kernel.org>
Cc: Zhiguo Niu <zhiguo.niu@unisoc.com>
Fixes: 07757588e5 ("block/mq-deadline: Reserve 25% of scheduler tags for synchronous requests")
Signed-off-by: Bart Van Assche <bvanassche@acm.org>
Tested-by: Zhiguo Niu <zhiguo.niu@unisoc.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/20240509170149.7639-2-bvanassche@acm.org
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Bart Van Assche 2024-05-09 10:01:48 -07:00 committed by Greg Kroah-Hartman
parent 43158f1463
commit 9c18787ec3

View File

@ -439,6 +439,7 @@ __blk_mq_alloc_requests_batch(struct blk_mq_alloc_data *data,
static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
{
void (*limit_depth)(blk_opf_t, struct blk_mq_alloc_data *) = NULL;
struct request_queue *q = data->q;
u64 alloc_time_ns = 0;
struct request *rq;
@ -465,7 +466,7 @@ static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
!blk_op_is_passthrough(data->cmd_flags) &&
e->type->ops.limit_depth &&
!(data->flags & BLK_MQ_REQ_RESERVED))
e->type->ops.limit_depth(data->cmd_flags, data);
limit_depth = e->type->ops.limit_depth;
}
retry:
@ -477,6 +478,9 @@ retry:
if (data->flags & BLK_MQ_REQ_RESERVED)
data->rq_flags |= RQF_RESV;
if (limit_depth)
limit_depth(data->cmd_flags, data);
/*
* Try batched alloc if we want more than 1 tag.
*/