mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-04 17:44:14 +08:00
block-5.17-2022-02-11
-----BEGIN PGP SIGNATURE----- iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAmIGZfEQHGF4Ym9lQGtl cm5lbC5kawAKCRD301j7KXHgpiVlEACiRNthLM/OvfzsPfAMr4o6ieEuepSUazqY WK4l4O9UH4RR60wfFaPngJsjIsNVcg50qf1jJ6iRqCPPx1xpPKwk7aKs5ckze0Z2 WHnc29pp/Pn2GBPLY+WN3OBIK4qaMu6GSOyqs8WPWmh9Ypgz5tV3pf3WyzjllQ7e k3rJ/Zs6WpFLFiCNqXJ36kBmJflF75fOc/0a8eScfI5OXkseR48Gya8kkhweylKs zyFLWeB905U5FVNgytW6NR42vXtRE8YfXVmlXgqZYu5nB3maUhL06Ex/9kFg/I4k agRA4kWOJAbXXfhU0nmfLteqr6wgHZq+BtuVS45BhmsfUdiAa4MrLDR491Mc0Eog F601kQXh/PmVauetF3mRbefIgNgfj39ZKXKOxvEDU2z3bCr6nnEFm29vMR46ae+k ND7ajfyJ2eWT7VnRpES4Ojz1GpoIHIs5mBJzFiATiVmXgymK/b/LA1m4m2vXabvS qWDW66EDdQnseK5ogALph+C6PL6d7iGLu94GPTBlq2Of64B0NYY4ne1Fzwce9nVn 3f3z7xHIZcv9yXzy9EufDEA/RnryFCkzM1s2K/2mF2GFuVFa4LCZfj4F+q06d/Bv NQHUf/T7nqNiG56whsKOXBKw2iE6KrHU7r5COKU3oEvzms9yqvkp5SaC3g/3kSuM eaZIVIZvjw== =tytd -----END PGP SIGNATURE----- Merge tag 'block-5.17-2022-02-11' of git://git.kernel.dk/linux-block Pull block fixes from Jens Axboe: - NVMe pull request - nvme-tcp: fix bogus request completion when failing to send AER (Sagi Grimberg) - add the missing nvme_complete_req tracepoint for batched completion (Bean Huo) - Revert of the loop async autoclear issue that has continued to plague us this release. A few patchsets exists to improve this, but they are too invasive to be considered at this point (Tetsuo) * tag 'block-5.17-2022-02-11' of git://git.kernel.dk/linux-block: loop: revert "make autoclear operation asynchronous" nvme-tcp: fix bogus request completion when failing to send AER nvme: add nvme_complete_req tracepoint for batched completion
This commit is contained in:
commit
cf26a2360a
@ -1082,7 +1082,7 @@ out_putf:
|
||||
return error;
|
||||
}
|
||||
|
||||
static void __loop_clr_fd(struct loop_device *lo)
|
||||
static void __loop_clr_fd(struct loop_device *lo, bool release)
|
||||
{
|
||||
struct file *filp;
|
||||
gfp_t gfp = lo->old_gfp_mask;
|
||||
@ -1144,6 +1144,8 @@ static void __loop_clr_fd(struct loop_device *lo)
|
||||
/* let user-space know about this change */
|
||||
kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE);
|
||||
mapping_set_gfp_mask(filp->f_mapping, gfp);
|
||||
/* This is safe: open() is still holding a reference. */
|
||||
module_put(THIS_MODULE);
|
||||
blk_mq_unfreeze_queue(lo->lo_queue);
|
||||
|
||||
disk_force_media_change(lo->lo_disk, DISK_EVENT_MEDIA_CHANGE);
|
||||
@ -1151,52 +1153,44 @@ static void __loop_clr_fd(struct loop_device *lo)
|
||||
if (lo->lo_flags & LO_FLAGS_PARTSCAN) {
|
||||
int err;
|
||||
|
||||
mutex_lock(&lo->lo_disk->open_mutex);
|
||||
/*
|
||||
* open_mutex has been held already in release path, so don't
|
||||
* acquire it if this function is called in such case.
|
||||
*
|
||||
* If the reread partition isn't from release path, lo_refcnt
|
||||
* must be at least one and it can only become zero when the
|
||||
* current holder is released.
|
||||
*/
|
||||
if (!release)
|
||||
mutex_lock(&lo->lo_disk->open_mutex);
|
||||
err = bdev_disk_changed(lo->lo_disk, false);
|
||||
mutex_unlock(&lo->lo_disk->open_mutex);
|
||||
if (!release)
|
||||
mutex_unlock(&lo->lo_disk->open_mutex);
|
||||
if (err)
|
||||
pr_warn("%s: partition scan of loop%d failed (rc=%d)\n",
|
||||
__func__, lo->lo_number, err);
|
||||
/* Device is gone, no point in returning error */
|
||||
}
|
||||
|
||||
/*
|
||||
* lo->lo_state is set to Lo_unbound here after above partscan has
|
||||
* finished. There cannot be anybody else entering __loop_clr_fd() as
|
||||
* Lo_rundown state protects us from all the other places trying to
|
||||
* change the 'lo' device.
|
||||
*/
|
||||
lo->lo_flags = 0;
|
||||
if (!part_shift)
|
||||
lo->lo_disk->flags |= GENHD_FL_NO_PART;
|
||||
|
||||
fput(filp);
|
||||
}
|
||||
|
||||
static void loop_rundown_completed(struct loop_device *lo)
|
||||
{
|
||||
mutex_lock(&lo->lo_mutex);
|
||||
lo->lo_state = Lo_unbound;
|
||||
mutex_unlock(&lo->lo_mutex);
|
||||
module_put(THIS_MODULE);
|
||||
}
|
||||
|
||||
static void loop_rundown_workfn(struct work_struct *work)
|
||||
{
|
||||
struct loop_device *lo = container_of(work, struct loop_device,
|
||||
rundown_work);
|
||||
struct block_device *bdev = lo->lo_device;
|
||||
struct gendisk *disk = lo->lo_disk;
|
||||
|
||||
__loop_clr_fd(lo);
|
||||
kobject_put(&bdev->bd_device.kobj);
|
||||
module_put(disk->fops->owner);
|
||||
loop_rundown_completed(lo);
|
||||
}
|
||||
|
||||
static void loop_schedule_rundown(struct loop_device *lo)
|
||||
{
|
||||
struct block_device *bdev = lo->lo_device;
|
||||
struct gendisk *disk = lo->lo_disk;
|
||||
|
||||
__module_get(disk->fops->owner);
|
||||
kobject_get(&bdev->bd_device.kobj);
|
||||
INIT_WORK(&lo->rundown_work, loop_rundown_workfn);
|
||||
queue_work(system_long_wq, &lo->rundown_work);
|
||||
/*
|
||||
* Need not hold lo_mutex to fput backing file. Calling fput holding
|
||||
* lo_mutex triggers a circular lock dependency possibility warning as
|
||||
* fput can take open_mutex which is usually taken before lo_mutex.
|
||||
*/
|
||||
fput(filp);
|
||||
}
|
||||
|
||||
static int loop_clr_fd(struct loop_device *lo)
|
||||
@ -1228,8 +1222,7 @@ static int loop_clr_fd(struct loop_device *lo)
|
||||
lo->lo_state = Lo_rundown;
|
||||
mutex_unlock(&lo->lo_mutex);
|
||||
|
||||
__loop_clr_fd(lo);
|
||||
loop_rundown_completed(lo);
|
||||
__loop_clr_fd(lo, false);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1754,7 +1747,7 @@ static void lo_release(struct gendisk *disk, fmode_t mode)
|
||||
* In autoclear mode, stop the loop thread
|
||||
* and remove configuration after last close.
|
||||
*/
|
||||
loop_schedule_rundown(lo);
|
||||
__loop_clr_fd(lo, true);
|
||||
return;
|
||||
} else if (lo->lo_state == Lo_bound) {
|
||||
/*
|
||||
|
@ -56,7 +56,6 @@ struct loop_device {
|
||||
struct gendisk *lo_disk;
|
||||
struct mutex lo_mutex;
|
||||
bool idr_visible;
|
||||
struct work_struct rundown_work;
|
||||
};
|
||||
|
||||
struct loop_cmd {
|
||||
|
@ -368,6 +368,7 @@ EXPORT_SYMBOL_GPL(nvme_complete_rq);
|
||||
|
||||
void nvme_complete_batch_req(struct request *req)
|
||||
{
|
||||
trace_nvme_complete_rq(req);
|
||||
nvme_cleanup_cmd(req);
|
||||
nvme_end_req_zoned(req);
|
||||
}
|
||||
|
@ -913,7 +913,15 @@ static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue)
|
||||
|
||||
static void nvme_tcp_fail_request(struct nvme_tcp_request *req)
|
||||
{
|
||||
nvme_tcp_end_request(blk_mq_rq_from_pdu(req), NVME_SC_HOST_PATH_ERROR);
|
||||
if (nvme_tcp_async_req(req)) {
|
||||
union nvme_result res = {};
|
||||
|
||||
nvme_complete_async_event(&req->queue->ctrl->ctrl,
|
||||
cpu_to_le16(NVME_SC_HOST_PATH_ERROR), &res);
|
||||
} else {
|
||||
nvme_tcp_end_request(blk_mq_rq_from_pdu(req),
|
||||
NVME_SC_HOST_PATH_ERROR);
|
||||
}
|
||||
}
|
||||
|
||||
static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
|
||||
|
Loading…
Reference in New Issue
Block a user