mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-30 07:34:12 +08:00
block: attempt direct issue of plug list
If we have just one queue type in the plug list, then we can extend our direct issue to cover a full plug list as well. This allows sending a batch of requests for direct issue, which is more efficient than doing one-at-a-time kind of issue. Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
bc490f8173
commit
dc5fc361d8
@ -1555,6 +1555,7 @@ void blk_start_plug_nr_ios(struct blk_plug *plug, unsigned short nr_ios)
|
||||
plug->nr_ios = min_t(unsigned short, nr_ios, BLK_MAX_REQUEST_COUNT);
|
||||
plug->rq_count = 0;
|
||||
plug->multiple_queues = false;
|
||||
plug->has_elevator = false;
|
||||
plug->nowait = false;
|
||||
INIT_LIST_HEAD(&plug->cb_list);
|
||||
|
||||
|
@ -2149,6 +2149,58 @@ void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
|
||||
spin_unlock(&ctx->lock);
|
||||
}
|
||||
|
||||
static void blk_mq_commit_rqs(struct blk_mq_hw_ctx *hctx, int *queued,
|
||||
bool from_schedule)
|
||||
{
|
||||
if (hctx->queue->mq_ops->commit_rqs) {
|
||||
trace_block_unplug(hctx->queue, *queued, !from_schedule);
|
||||
hctx->queue->mq_ops->commit_rqs(hctx);
|
||||
}
|
||||
*queued = 0;
|
||||
}
|
||||
|
||||
static void blk_mq_plug_issue_direct(struct blk_plug *plug, bool from_schedule)
|
||||
{
|
||||
struct blk_mq_hw_ctx *hctx = NULL;
|
||||
struct request *rq;
|
||||
int queued = 0;
|
||||
int errors = 0;
|
||||
|
||||
while ((rq = rq_list_pop(&plug->mq_list))) {
|
||||
bool last = rq_list_empty(plug->mq_list);
|
||||
blk_status_t ret;
|
||||
|
||||
if (hctx != rq->mq_hctx) {
|
||||
if (hctx)
|
||||
blk_mq_commit_rqs(hctx, &queued, from_schedule);
|
||||
hctx = rq->mq_hctx;
|
||||
}
|
||||
|
||||
ret = blk_mq_request_issue_directly(rq, last);
|
||||
switch (ret) {
|
||||
case BLK_STS_OK:
|
||||
queued++;
|
||||
break;
|
||||
case BLK_STS_RESOURCE:
|
||||
case BLK_STS_DEV_RESOURCE:
|
||||
blk_mq_request_bypass_insert(rq, false, last);
|
||||
blk_mq_commit_rqs(hctx, &queued, from_schedule);
|
||||
return;
|
||||
default:
|
||||
blk_mq_end_request(rq, ret);
|
||||
errors++;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* If we didn't flush the entire list, we could have told the driver
|
||||
* there was more coming, but that turned out to be a lie.
|
||||
*/
|
||||
if (errors)
|
||||
blk_mq_commit_rqs(hctx, &queued, from_schedule);
|
||||
}
|
||||
|
||||
void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
|
||||
{
|
||||
struct blk_mq_hw_ctx *this_hctx;
|
||||
@ -2160,6 +2212,12 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
|
||||
return;
|
||||
plug->rq_count = 0;
|
||||
|
||||
if (!plug->multiple_queues && !plug->has_elevator) {
|
||||
blk_mq_plug_issue_direct(plug, from_schedule);
|
||||
if (rq_list_empty(plug->mq_list))
|
||||
return;
|
||||
}
|
||||
|
||||
this_hctx = NULL;
|
||||
this_ctx = NULL;
|
||||
depth = 0;
|
||||
@ -2376,6 +2434,8 @@ static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
|
||||
if (nxt && nxt->q != rq->q)
|
||||
plug->multiple_queues = true;
|
||||
}
|
||||
if (!plug->has_elevator && (rq->rq_flags & RQF_ELV))
|
||||
plug->has_elevator = true;
|
||||
rq->rq_next = NULL;
|
||||
rq_list_add(&plug->mq_list, rq);
|
||||
plug->rq_count++;
|
||||
|
@ -737,6 +737,7 @@ struct blk_plug {
|
||||
unsigned short rq_count;
|
||||
|
||||
bool multiple_queues;
|
||||
bool has_elevator;
|
||||
bool nowait;
|
||||
|
||||
struct list_head cb_list; /* md requires an unplug callback */
|
||||
|
Loading…
Reference in New Issue
Block a user