mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2025-01-19 04:14:49 +08:00
block, bfq: make bfq_has_work() more accurate
bfq_has_work() is using busy_queues currently, which is not accurate because bfq_queue is busy doesn't represent that it has requests. Since bfqd aready has a counter 'queued' to record how many requests are in bfq, use it instead of busy_queues. Noted that bfq_has_work() can be called with 'bfqd->lock' held, thus the lock can't be held in bfq_has_work() to protect 'bfqd->queued'. Signed-off-by: Yu Kuai <yukuai3@huawei.com> Reviewed-by: Jan Kara <jack@suse.cz> Link: https://lore.kernel.org/r/20220513023507.2625717-3-yukuai3@huawei.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
181490d532
commit
ddc25c86b4
@ -2204,7 +2204,11 @@ static void bfq_add_request(struct request *rq)
|
||||
|
||||
bfq_log_bfqq(bfqd, bfqq, "add_request %d", rq_is_sync(rq));
|
||||
bfqq->queued[rq_is_sync(rq)]++;
|
||||
bfqd->queued++;
|
||||
/*
|
||||
* Updating of 'bfqd->queued' is protected by 'bfqd->lock', however, it
|
||||
* may be read without holding the lock in bfq_has_work().
|
||||
*/
|
||||
WRITE_ONCE(bfqd->queued, bfqd->queued + 1);
|
||||
|
||||
if (RB_EMPTY_ROOT(&bfqq->sort_list) && bfq_bfqq_sync(bfqq)) {
|
||||
bfq_check_waker(bfqd, bfqq, now_ns);
|
||||
@ -2396,7 +2400,11 @@ static void bfq_remove_request(struct request_queue *q,
|
||||
if (rq->queuelist.prev != &rq->queuelist)
|
||||
list_del_init(&rq->queuelist);
|
||||
bfqq->queued[sync]--;
|
||||
bfqd->queued--;
|
||||
/*
|
||||
* Updating of 'bfqd->queued' is protected by 'bfqd->lock', however, it
|
||||
* may be read without holding the lock in bfq_has_work().
|
||||
*/
|
||||
WRITE_ONCE(bfqd->queued, bfqd->queued - 1);
|
||||
elv_rb_del(&bfqq->sort_list, rq);
|
||||
|
||||
elv_rqhash_del(q, rq);
|
||||
@ -5057,11 +5065,11 @@ static bool bfq_has_work(struct blk_mq_hw_ctx *hctx)
|
||||
struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
|
||||
|
||||
/*
|
||||
* Avoiding lock: a race on bfqd->busy_queues should cause at
|
||||
* Avoiding lock: a race on bfqd->queued should cause at
|
||||
* most a call to dispatch for nothing
|
||||
*/
|
||||
return !list_empty_careful(&bfqd->dispatch) ||
|
||||
bfq_tot_busy_queues(bfqd) > 0;
|
||||
READ_ONCE(bfqd->queued);
|
||||
}
|
||||
|
||||
static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
|
||||
|
Loading…
Reference in New Issue
Block a user