mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 04:18:39 +08:00
blk-mq: remove __blk_mq_run_hw_queue
__blk_mq_run_hw_queue just contains a WARN_ON_ONCE for calls from interrupt context and a blk_mq_run_dispatch_ops-protected call to blk_mq_sched_dispatch_requests. Open code the call to blk_mq_sched_dispatch_requests in both callers, and move the WARN_ON_ONCE to blk_mq_run_hw_queue where it can be extended to all !async calls, while the other call is from workqueue context and thus obviously does not need the assert. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Damien Le Moal <dlemoal@kernel.org> Link: https://lore.kernel.org/r/20230413060651.694656-6-hch@lst.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
1aa8d875b5
commit
4d5bba5bee
@ -2138,24 +2138,6 @@ out:
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* __blk_mq_run_hw_queue - Run a hardware queue.
|
||||
* @hctx: Pointer to the hardware queue to run.
|
||||
*
|
||||
* Send pending requests to the hardware.
|
||||
*/
|
||||
static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
|
||||
{
|
||||
/*
|
||||
* We can't run the queue inline with ints disabled. Ensure that
|
||||
* we catch bad users of this early.
|
||||
*/
|
||||
WARN_ON_ONCE(in_interrupt());
|
||||
|
||||
blk_mq_run_dispatch_ops(hctx->queue,
|
||||
blk_mq_sched_dispatch_requests(hctx));
|
||||
}
|
||||
|
||||
static inline int blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx *hctx)
|
||||
{
|
||||
int cpu = cpumask_first_and(hctx->cpumask, cpu_online_mask);
|
||||
@ -2240,6 +2222,11 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
|
||||
{
|
||||
bool need_run;
|
||||
|
||||
/*
|
||||
* We can't run the queue inline with interrupts disabled.
|
||||
*/
|
||||
WARN_ON_ONCE(!async && in_interrupt());
|
||||
|
||||
/*
|
||||
* When queue is quiesced, we may be switching io scheduler, or
|
||||
* updating nr_hw_queues, or other things, and we can't run queue
|
||||
@ -2261,7 +2248,8 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
|
||||
return;
|
||||
}
|
||||
|
||||
__blk_mq_run_hw_queue(hctx);
|
||||
blk_mq_run_dispatch_ops(hctx->queue,
|
||||
blk_mq_sched_dispatch_requests(hctx));
|
||||
}
|
||||
EXPORT_SYMBOL(blk_mq_run_hw_queue);
|
||||
|
||||
@ -2429,7 +2417,8 @@ static void blk_mq_run_work_fn(struct work_struct *work)
|
||||
struct blk_mq_hw_ctx *hctx =
|
||||
container_of(work, struct blk_mq_hw_ctx, run_work.work);
|
||||
|
||||
__blk_mq_run_hw_queue(hctx);
|
||||
blk_mq_run_dispatch_ops(hctx->queue,
|
||||
blk_mq_sched_dispatch_requests(hctx));
|
||||
}
|
||||
|
||||
/**
|
||||
|
Loading…
Reference in New Issue
Block a user