mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 04:18:39 +08:00
blk-mq: open code __blk_mq_alloc_request in blk_mq_alloc_request_hctx
blk_mq_alloc_request_hctx is only used for NVMeoF connect commands, so tailor it to the specific requirements, and don't bother the general fast path code with its special twinkles. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Ming Lei <ming.lei@redhat.com> Reviewed-by: Hannes Reinecke <hare@suse.de Reviewed-by: Daniel Wagner <dwagner@suse.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
766473681c
commit
600c3b0cea
@ -351,21 +351,13 @@ static struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data)
|
||||
{
|
||||
struct request_queue *q = data->q;
|
||||
struct elevator_queue *e = q->elevator;
|
||||
unsigned int tag;
|
||||
bool clear_ctx_on_error = false;
|
||||
u64 alloc_time_ns = 0;
|
||||
unsigned int tag;
|
||||
|
||||
/* alloc_time includes depth and tag waits */
|
||||
if (blk_queue_rq_alloc_time(q))
|
||||
alloc_time_ns = ktime_get_ns();
|
||||
|
||||
if (likely(!data->ctx)) {
|
||||
data->ctx = blk_mq_get_ctx(q);
|
||||
clear_ctx_on_error = true;
|
||||
}
|
||||
if (likely(!data->hctx))
|
||||
data->hctx = blk_mq_map_queue(q, data->cmd_flags,
|
||||
data->ctx);
|
||||
if (data->cmd_flags & REQ_NOWAIT)
|
||||
data->flags |= BLK_MQ_REQ_NOWAIT;
|
||||
|
||||
@ -381,17 +373,16 @@ static struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data)
|
||||
e->type->ops.limit_depth &&
|
||||
!(data->flags & BLK_MQ_REQ_RESERVED))
|
||||
e->type->ops.limit_depth(data->cmd_flags, data);
|
||||
} else {
|
||||
blk_mq_tag_busy(data->hctx);
|
||||
}
|
||||
|
||||
data->ctx = blk_mq_get_ctx(q);
|
||||
data->hctx = blk_mq_map_queue(q, data->cmd_flags, data->ctx);
|
||||
if (!(data->flags & BLK_MQ_REQ_INTERNAL))
|
||||
blk_mq_tag_busy(data->hctx);
|
||||
|
||||
tag = blk_mq_get_tag(data);
|
||||
if (tag == BLK_MQ_NO_TAG) {
|
||||
if (clear_ctx_on_error)
|
||||
data->ctx = NULL;
|
||||
if (tag == BLK_MQ_NO_TAG)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return blk_mq_rq_ctx_init(data, tag, alloc_time_ns);
|
||||
}
|
||||
|
||||
@ -431,17 +422,22 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
|
||||
.flags = flags,
|
||||
.cmd_flags = op,
|
||||
};
|
||||
struct request *rq;
|
||||
u64 alloc_time_ns = 0;
|
||||
unsigned int cpu;
|
||||
unsigned int tag;
|
||||
int ret;
|
||||
|
||||
/* alloc_time includes depth and tag waits */
|
||||
if (blk_queue_rq_alloc_time(q))
|
||||
alloc_time_ns = ktime_get_ns();
|
||||
|
||||
/*
|
||||
* If the tag allocator sleeps we could get an allocation for a
|
||||
* different hardware context. No need to complicate the low level
|
||||
* allocator for this for the rare use case of a command tied to
|
||||
* a specific queue.
|
||||
*/
|
||||
if (WARN_ON_ONCE(!(flags & BLK_MQ_REQ_NOWAIT)))
|
||||
if (WARN_ON_ONCE(!(flags & (BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_RESERVED))))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
if (hctx_idx >= q->nr_hw_queues)
|
||||
@ -462,11 +458,17 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
|
||||
cpu = cpumask_first_and(data.hctx->cpumask, cpu_online_mask);
|
||||
data.ctx = __blk_mq_get_ctx(q, cpu);
|
||||
|
||||
if (q->elevator)
|
||||
data.flags |= BLK_MQ_REQ_INTERNAL;
|
||||
else
|
||||
blk_mq_tag_busy(data.hctx);
|
||||
|
||||
ret = -EWOULDBLOCK;
|
||||
rq = __blk_mq_alloc_request(&data);
|
||||
if (!rq)
|
||||
tag = blk_mq_get_tag(&data);
|
||||
if (tag == BLK_MQ_NO_TAG)
|
||||
goto out_queue_exit;
|
||||
return rq;
|
||||
return blk_mq_rq_ctx_init(&data, tag, alloc_time_ns);
|
||||
|
||||
out_queue_exit:
|
||||
blk_queue_exit(q);
|
||||
return ERR_PTR(ret);
|
||||
|
Loading…
Reference in New Issue
Block a user