mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-18 02:04:05 +08:00
for-linus-20191101
-----BEGIN PGP SIGNATURE----- iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAl28lRQQHGF4Ym9lQGtl cm5lbC5kawAKCRD301j7KXHgpjtbEADbrMXdsdPV2CApxSiZIaWO1mR78yy/btxp cHsJ+avaPGxhNukSsose2KWm656SriH/OfQspqtvzDpslbu40V41+vSqSknqGRPr 8jW5efZIAy6dq0FjbtBnmIV6PhC5d4F/nAEQbsnVRn8RSr3OwQcm8/smpSFA8urI oHVU8jiyLsQiSbDvjf2KPhPYhWBHO0W5SyGo29HY8pSzQpsMzGkQ6TcHL4EzwPZP WPtGglr14v8rMyhNMxUdHZ9eHCMq7uufFPuyJXzesE/qyM+H8p2pwwxyfflHGZil w2vxLJRu8d4UIkHEkNbC0bydXJ+eCtRMBZON1ZGdrZwQ58L9AbBPBZmxKb0LkmHb 4tc/yQm/0kSUUXwFtDoUoIBFjjy36Pl5BsLt4n5fofsl04myhm5CLqZ8oWxyU0vO sCinJwk1+eQO/tbQVDfven+MroNlYVPCnXhIe/12/wEba3EJ7Ab4X5p0lJoJ1oY7 9dQyY6+BaHd4wV9p0domOP5y7dJnXM9k46EF0/5YoNjoqaH5MWPMq355VH2xNjdw 5HzRcZfvOAlXASrnXuQAAQAdR2b+s/iFZaNKA7bTZxjNPvYE0zySCMeQeNXmfVKe CrDuwViWukwIzETDZHYqMWJxOV4nyOeL3jTo7rQp5A5TEWwBiJKQ4aGBif2eqc+L Mk41ziQGuQ== =+rar -----END PGP SIGNATURE----- Merge tag 'for-linus-20191101' of git://git.kernel.dk/linux-block Pull block fixes from Jens Axboe: - Two small nvme fixes, one is a fabrics connection fix, the other one a cleanup made possible by that fix (Anton, via Keith) - Fix requeue handling in umb ubd (Anton) - Fix spin_lock_irq() nesting in blk-iocost (Dan) - Three small io_uring fixes: - Install io_uring fd after done with ctx (me) - Clear ->result before every poll issue (me) - Fix leak of shadow request on error (Pavel) * tag 'for-linus-20191101' of git://git.kernel.dk/linux-block: iocost: don't nest spin_lock_irq in ioc_weight_write() io_uring: ensure we clear io_kiocb->result before each issue um-ubd: Entrust re-queue to the upper layers nvme-multipath: remove unused groups_only mode in ana log nvme-multipath: fix possible io hang after ctrl reconnect io_uring: don't touch ctx in setup after ring fd install io_uring: Fix leaked shadow_req
This commit is contained in:
commit
0821de2896
@ -1403,8 +1403,12 @@ static blk_status_t ubd_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||
|
||||
spin_unlock_irq(&ubd_dev->lock);
|
||||
|
||||
if (ret < 0)
|
||||
blk_mq_requeue_request(req, true);
|
||||
if (ret < 0) {
|
||||
if (ret == -ENOMEM)
|
||||
res = BLK_STS_RESOURCE;
|
||||
else
|
||||
res = BLK_STS_DEV_RESOURCE;
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
@ -2110,10 +2110,10 @@ static ssize_t ioc_weight_write(struct kernfs_open_file *of, char *buf,
|
||||
goto einval;
|
||||
}
|
||||
|
||||
spin_lock_irq(&iocg->ioc->lock);
|
||||
spin_lock(&iocg->ioc->lock);
|
||||
iocg->cfg_weight = v;
|
||||
weight_updated(iocg);
|
||||
spin_unlock_irq(&iocg->ioc->lock);
|
||||
spin_unlock(&iocg->ioc->lock);
|
||||
|
||||
blkg_conf_finish(&ctx);
|
||||
return nbytes;
|
||||
|
@ -522,14 +522,13 @@ static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nvme_read_ana_log(struct nvme_ctrl *ctrl, bool groups_only)
|
||||
static int nvme_read_ana_log(struct nvme_ctrl *ctrl)
|
||||
{
|
||||
u32 nr_change_groups = 0;
|
||||
int error;
|
||||
|
||||
mutex_lock(&ctrl->ana_lock);
|
||||
error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_ANA,
|
||||
groups_only ? NVME_ANA_LOG_RGO : 0,
|
||||
error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_ANA, 0,
|
||||
ctrl->ana_log_buf, ctrl->ana_log_size, 0);
|
||||
if (error) {
|
||||
dev_warn(ctrl->device, "Failed to get ANA log: %d\n", error);
|
||||
@ -565,7 +564,7 @@ static void nvme_ana_work(struct work_struct *work)
|
||||
{
|
||||
struct nvme_ctrl *ctrl = container_of(work, struct nvme_ctrl, ana_work);
|
||||
|
||||
nvme_read_ana_log(ctrl, false);
|
||||
nvme_read_ana_log(ctrl);
|
||||
}
|
||||
|
||||
static void nvme_anatt_timeout(struct timer_list *t)
|
||||
@ -715,7 +714,7 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
|
||||
goto out;
|
||||
}
|
||||
|
||||
error = nvme_read_ana_log(ctrl, true);
|
||||
error = nvme_read_ana_log(ctrl);
|
||||
if (error)
|
||||
goto out_free_ana_log_buf;
|
||||
return 0;
|
||||
|
@ -1124,6 +1124,7 @@ static int io_prep_rw(struct io_kiocb *req, const struct sqe_submit *s,
|
||||
|
||||
kiocb->ki_flags |= IOCB_HIPRI;
|
||||
kiocb->ki_complete = io_complete_rw_iopoll;
|
||||
req->result = 0;
|
||||
} else {
|
||||
if (kiocb->ki_flags & IOCB_HIPRI)
|
||||
return -EINVAL;
|
||||
@ -2413,6 +2414,7 @@ static int io_queue_link_head(struct io_ring_ctx *ctx, struct io_kiocb *req,
|
||||
if (ret) {
|
||||
if (ret != -EIOCBQUEUED) {
|
||||
io_free_req(req);
|
||||
__io_free_req(shadow);
|
||||
io_cqring_add_event(ctx, s->sqe->user_data, ret);
|
||||
return 0;
|
||||
}
|
||||
@ -3828,10 +3830,6 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p)
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
ret = io_uring_get_fd(ctx);
|
||||
if (ret < 0)
|
||||
goto err;
|
||||
|
||||
memset(&p->sq_off, 0, sizeof(p->sq_off));
|
||||
p->sq_off.head = offsetof(struct io_rings, sq.head);
|
||||
p->sq_off.tail = offsetof(struct io_rings, sq.tail);
|
||||
@ -3849,6 +3847,14 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p)
|
||||
p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
|
||||
p->cq_off.cqes = offsetof(struct io_rings, cqes);
|
||||
|
||||
/*
|
||||
* Install ring fd as the very last thing, so we don't risk someone
|
||||
* having closed it before we finish setup
|
||||
*/
|
||||
ret = io_uring_get_fd(ctx);
|
||||
if (ret < 0)
|
||||
goto err;
|
||||
|
||||
p->features = IORING_FEAT_SINGLE_MMAP;
|
||||
return ret;
|
||||
err:
|
||||
|
Loading…
Reference in New Issue
Block a user