mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-25 21:54:06 +08:00
Merge branch 'for-linus' of git://git.kernel.dk/linux-block
Pull final block fixes from Jens Axboe:
"This week and last we've been fixing some corner cases related to
blk-mq, mostly. I ended up pulling most of that out of for-linus
yesterday, which is why the branch looks fresh. The rest were
postponed for 3.18.
This pull request contains:
- Fix from Christoph, avoiding a stack overflow when FUA insertion
would recursive infinitely.
- Fix from David Hildenbrand on races between the timeout handler and
uninitialized requests. Fixes a real issue that virtio_blk has run
into.
- A few fixes from me:
- Ensure that request deadline/timeout is ordered before the
request is marked as started.
- A potential oops on out-of-memory, when we scale the queue
depth of the device and retry.
- A hang fix on requeue from SCSI, where the hardware queue
would be stopped when we attempt to re-run it (and hence
nothing would happen, stalling progress).
- A fix for commit 2da78092
, where the cleanup path was moved
to RCU, but a debug might_sleep() was inadvertently left in
the code. This causes warnings for people"
* 'for-linus' of git://git.kernel.dk/linux-block:
genhd: fix leftover might_sleep() in blk_free_devt()
blk-mq: use blk_mq_start_hw_queues() when running requeue work
blk-mq: fix potential oops on out-of-memory in __blk_mq_alloc_rq_maps()
blk-mq: avoid infinite recursion with the FUA flag
blk-mq: Avoid race condition with uninitialized requests
blk-mq: request deadline must be visible before marking rq as started
This commit is contained in:
commit
31f9bf46a5
@ -56,6 +56,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
|
|||||||
bool is_pm_resume;
|
bool is_pm_resume;
|
||||||
|
|
||||||
WARN_ON(irqs_disabled());
|
WARN_ON(irqs_disabled());
|
||||||
|
WARN_ON(rq->cmd_type == REQ_TYPE_FS);
|
||||||
|
|
||||||
rq->rq_disk = bd_disk;
|
rq->rq_disk = bd_disk;
|
||||||
rq->end_io = done;
|
rq->end_io = done;
|
||||||
|
@ -203,7 +203,6 @@ __blk_mq_alloc_request(struct blk_mq_alloc_data *data, int rw)
|
|||||||
if (tag != BLK_MQ_TAG_FAIL) {
|
if (tag != BLK_MQ_TAG_FAIL) {
|
||||||
rq = data->hctx->tags->rqs[tag];
|
rq = data->hctx->tags->rqs[tag];
|
||||||
|
|
||||||
rq->cmd_flags = 0;
|
|
||||||
if (blk_mq_tag_busy(data->hctx)) {
|
if (blk_mq_tag_busy(data->hctx)) {
|
||||||
rq->cmd_flags = REQ_MQ_INFLIGHT;
|
rq->cmd_flags = REQ_MQ_INFLIGHT;
|
||||||
atomic_inc(&data->hctx->nr_active);
|
atomic_inc(&data->hctx->nr_active);
|
||||||
@ -258,6 +257,7 @@ static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
|
|||||||
|
|
||||||
if (rq->cmd_flags & REQ_MQ_INFLIGHT)
|
if (rq->cmd_flags & REQ_MQ_INFLIGHT)
|
||||||
atomic_dec(&hctx->nr_active);
|
atomic_dec(&hctx->nr_active);
|
||||||
|
rq->cmd_flags = 0;
|
||||||
|
|
||||||
clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
|
clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
|
||||||
blk_mq_put_tag(hctx, tag, &ctx->last_tag);
|
blk_mq_put_tag(hctx, tag, &ctx->last_tag);
|
||||||
@ -392,6 +392,12 @@ static void blk_mq_start_request(struct request *rq, bool last)
|
|||||||
|
|
||||||
blk_add_timer(rq);
|
blk_add_timer(rq);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Ensure that ->deadline is visible before set the started
|
||||||
|
* flag and clear the completed flag.
|
||||||
|
*/
|
||||||
|
smp_mb__before_atomic();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Mark us as started and clear complete. Complete might have been
|
* Mark us as started and clear complete. Complete might have been
|
||||||
* set if requeue raced with timeout, which then marked it as
|
* set if requeue raced with timeout, which then marked it as
|
||||||
@ -473,7 +479,11 @@ static void blk_mq_requeue_work(struct work_struct *work)
|
|||||||
blk_mq_insert_request(rq, false, false, false);
|
blk_mq_insert_request(rq, false, false, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
blk_mq_run_queues(q, false);
|
/*
|
||||||
|
* Use the start variant of queue running here, so that running
|
||||||
|
* the requeue work will kick stopped queues.
|
||||||
|
*/
|
||||||
|
blk_mq_start_hw_queues(q);
|
||||||
}
|
}
|
||||||
|
|
||||||
void blk_mq_add_to_requeue_list(struct request *rq, bool at_head)
|
void blk_mq_add_to_requeue_list(struct request *rq, bool at_head)
|
||||||
@ -957,14 +967,9 @@ void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
|
|||||||
|
|
||||||
hctx = q->mq_ops->map_queue(q, ctx->cpu);
|
hctx = q->mq_ops->map_queue(q, ctx->cpu);
|
||||||
|
|
||||||
if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA) &&
|
spin_lock(&ctx->lock);
|
||||||
!(rq->cmd_flags & (REQ_FLUSH_SEQ))) {
|
__blk_mq_insert_request(hctx, rq, at_head);
|
||||||
blk_insert_flush(rq);
|
spin_unlock(&ctx->lock);
|
||||||
} else {
|
|
||||||
spin_lock(&ctx->lock);
|
|
||||||
__blk_mq_insert_request(hctx, rq, at_head);
|
|
||||||
spin_unlock(&ctx->lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (run_queue)
|
if (run_queue)
|
||||||
blk_mq_run_hw_queue(hctx, async);
|
blk_mq_run_hw_queue(hctx, async);
|
||||||
@ -1404,6 +1409,8 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
|
|||||||
left -= to_do * rq_size;
|
left -= to_do * rq_size;
|
||||||
for (j = 0; j < to_do; j++) {
|
for (j = 0; j < to_do; j++) {
|
||||||
tags->rqs[i] = p;
|
tags->rqs[i] = p;
|
||||||
|
tags->rqs[i]->atomic_flags = 0;
|
||||||
|
tags->rqs[i]->cmd_flags = 0;
|
||||||
if (set->ops->init_request) {
|
if (set->ops->init_request) {
|
||||||
if (set->ops->init_request(set->driver_data,
|
if (set->ops->init_request(set->driver_data,
|
||||||
tags->rqs[i], hctx_idx, i,
|
tags->rqs[i], hctx_idx, i,
|
||||||
@ -1956,7 +1963,6 @@ out_unwind:
|
|||||||
while (--i >= 0)
|
while (--i >= 0)
|
||||||
blk_mq_free_rq_map(set, set->tags[i], i);
|
blk_mq_free_rq_map(set, set->tags[i], i);
|
||||||
|
|
||||||
set->tags = NULL;
|
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -445,8 +445,6 @@ int blk_alloc_devt(struct hd_struct *part, dev_t *devt)
|
|||||||
*/
|
*/
|
||||||
void blk_free_devt(dev_t devt)
|
void blk_free_devt(dev_t devt)
|
||||||
{
|
{
|
||||||
might_sleep();
|
|
||||||
|
|
||||||
if (devt == MKDEV(0, 0))
|
if (devt == MKDEV(0, 0))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user