block-5.14-2021-08-20
-----BEGIN PGP SIGNATURE----- iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAmEgaYcQHGF4Ym9lQGtl cm5lbC5kawAKCRD301j7KXHgppcLD/963Ld4YWLi1Chq6e2iqnUmuxPIgQbWCCD+ RKNf3PRLwjVLsBKLjKHhp9fB9XhpmlXkxkLcYech9D2lFpnVf1tc0ziGCIpsNuGG X2UO8jfE6/XJ+laCyjTkoMlj2zWBJXSwwKx6JyDDOobYLiuVDUHeAcGVvLY/itvx tEtd+lXmz7cE41Q4cdoeJdmSOE54BAP5uCO66La5bv2r7xQN/nPWi+yg5UTV3GJB JuL+8RHyV3d4eiBF9Jg0izdp9vaUxUD3VmOjILmaG2wQy+Pbve9mMCZtTFMvSBcR Vw9B/fbNVon7YqOsrSCdIsfW066MqnIj55nRRETN6LxTGuzx6lQpJPSRXSDGKkR5 SSckLXPKUcRPaX4Lc/SvgQpzvxhY3b9z3BRrIlxy8DWcZT7qq/bb41O9J4z6+jUn XIjKzvADLGqUqS/5zowyk/3vFHGnyhjYsRqMmpLCbjjxi5fSBbR+yorm5Vlx8auJ 7iWHuNCGyUY/rMB1pibYhvT1dnNR6qOm/jTdHwjsb/QPDuCoU06TFnXbuSoefJlf ijfuwKQLgxkikICLHQ0uUHSuGhz1A8CwAZjz4rmTBSiFgQeM09v/pf4r+ymxrSgU n+yb4DAECIsyK8he3ePahFeJgsb0JMmz3ciSJJkK3im69jdLd28Xp4Vs0tgKmz8e 2hMHgkXFSQ== =DGKg -----END PGP SIGNATURE----- Merge tag 'block-5.14-2021-08-20' of git://git.kernel.dk/linux-block Pull block fixes from Jens Axboe: "Three fixes from Ming Lei that should go into 5.14: - Fix for a kernel panic when iterating over tags for some cases where a flush request is present, a regression in this cycle. - Request timeout fix - Fix flush request checking" * tag 'block-5.14-2021-08-20' of git://git.kernel.dk/linux-block: blk-mq: fix is_flush_rq blk-mq: fix kernel panic during iterating over flush request blk-mq: don't grab rq's refcount in blk_mq_check_expired()
This commit is contained in:
commit
002c0aef10
@ -122,7 +122,6 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
|
||||
rq->internal_tag = BLK_MQ_NO_TAG;
|
||||
rq->start_time_ns = ktime_get_ns();
|
||||
rq->part = NULL;
|
||||
refcount_set(&rq->ref, 1);
|
||||
blk_crypto_rq_set_defaults(rq);
|
||||
}
|
||||
EXPORT_SYMBOL(blk_rq_init);
|
||||
|
@ -262,6 +262,11 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
|
||||
spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
|
||||
}
|
||||
|
||||
bool is_flush_rq(struct request *rq)
|
||||
{
|
||||
return rq->end_io == flush_end_io;
|
||||
}
|
||||
|
||||
/**
|
||||
* blk_kick_flush - consider issuing flush request
|
||||
* @q: request_queue being kicked
|
||||
@ -329,6 +334,14 @@ static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
|
||||
flush_rq->rq_flags |= RQF_FLUSH_SEQ;
|
||||
flush_rq->rq_disk = first_rq->rq_disk;
|
||||
flush_rq->end_io = flush_end_io;
|
||||
/*
|
||||
* Order WRITE ->end_io and WRITE rq->ref, and its pair is the one
|
||||
* implied in refcount_inc_not_zero() called from
|
||||
* blk_mq_find_and_get_req(), which orders WRITE/READ flush_rq->ref
|
||||
* and READ flush_rq->end_io
|
||||
*/
|
||||
smp_wmb();
|
||||
refcount_set(&flush_rq->ref, 1);
|
||||
|
||||
blk_flush_queue_rq(flush_rq, false);
|
||||
}
|
||||
|
@ -911,7 +911,7 @@ static bool blk_mq_req_expired(struct request *rq, unsigned long *next)
|
||||
|
||||
void blk_mq_put_rq_ref(struct request *rq)
|
||||
{
|
||||
if (is_flush_rq(rq, rq->mq_hctx))
|
||||
if (is_flush_rq(rq))
|
||||
rq->end_io(rq, 0);
|
||||
else if (refcount_dec_and_test(&rq->ref))
|
||||
__blk_mq_free_request(rq);
|
||||
@ -923,34 +923,14 @@ static bool blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
|
||||
unsigned long *next = priv;
|
||||
|
||||
/*
|
||||
* Just do a quick check if it is expired before locking the request in
|
||||
* so we're not unnecessarilly synchronizing across CPUs.
|
||||
*/
|
||||
if (!blk_mq_req_expired(rq, next))
|
||||
return true;
|
||||
|
||||
/*
|
||||
* We have reason to believe the request may be expired. Take a
|
||||
* reference on the request to lock this request lifetime into its
|
||||
* currently allocated context to prevent it from being reallocated in
|
||||
* the event the completion by-passes this timeout handler.
|
||||
*
|
||||
* If the reference was already released, then the driver beat the
|
||||
* timeout handler to posting a natural completion.
|
||||
*/
|
||||
if (!refcount_inc_not_zero(&rq->ref))
|
||||
return true;
|
||||
|
||||
/*
|
||||
* The request is now locked and cannot be reallocated underneath the
|
||||
* timeout handler's processing. Re-verify this exact request is truly
|
||||
* expired; if it is not expired, then the request was completed and
|
||||
* reallocated as a new request.
|
||||
* blk_mq_queue_tag_busy_iter() has locked the request, so it cannot
|
||||
* be reallocated underneath the timeout handler's processing, then
|
||||
* the expire check is reliable. If the request is not expired, then
|
||||
* it was completed and reallocated as a new request after returning
|
||||
* from blk_mq_check_expired().
|
||||
*/
|
||||
if (blk_mq_req_expired(rq, next))
|
||||
blk_mq_rq_timed_out(rq, reserved);
|
||||
|
||||
blk_mq_put_rq_ref(rq);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -44,11 +44,7 @@ static inline void __blk_get_queue(struct request_queue *q)
|
||||
kobject_get(&q->kobj);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
is_flush_rq(struct request *req, struct blk_mq_hw_ctx *hctx)
|
||||
{
|
||||
return hctx->fq->flush_rq == req;
|
||||
}
|
||||
bool is_flush_rq(struct request *req);
|
||||
|
||||
struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,
|
||||
gfp_t flags);
|
||||
|
Loading…
Reference in New Issue
Block a user