mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-02 00:24:12 +08:00
io_uring-5.6-2020-02-22
-----BEGIN PGP SIGNATURE----- iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAl5RXt4QHGF4Ym9lQGtl cm5lbC5kawAKCRD301j7KXHgprPZEACevRyIjhdEmD9eyXlixw1O6zs/dHR4QVf6 RuuNoX1Ssxmf4zHBPcifBVenoUhIviJ/hBACdYNuPz+YWdx3FO/BF8FFv656ssHr xhj8sC/8vz+fnwKyb/Lwt56NdRc8Ddtw6iWsF4po650n7JItq8BmDkHT/y3SJI0Z L1UrUX4TxXEDfKsW2gbNCNIPjaiDSErJFP6FT1pcUZwLmF3zyJC6btR21AaAJbRC CwatdbBg9K1SnvArn/NMd16C0p1LVBt3P2clagC90zlkCyb2vANN+YTnbo7KCsX7 XmssosPu5lamJQdsTNNxH7DHVUh/lZg9CEhUpy2ctXYSf1a6Ak6Y3qktCM5VW7FX x+6aZdJj0UDdA+MvdcHZWjxKfJFmbS2iRjTfbTXpyLX/1qFmvI9ww9xzgP68iK8s guxLxOQoCDx102SNKGmffcKY2C+yl3HHGRZATxy9C85WSvz7bwtvcbWwT/x13UxO TWa8ghe0N4jfJ3sNfADZ0Dtehrj8ryslrRc0XS6y7v3m7MqOABkz7texH006j43G FW23kqMyYJTlm+JIEIly9C5MSd4nFU0gyfBtMKGMBHF2JHgZez1LkDEiC2B5O1he m9IAhGgFzgOuTFwJxwLcutDNUv4GyK6dMdLl+DzAv0hthSHjVsT2vb06X99NMenq nzMADXHvxQ== =euzH -----END PGP SIGNATURE----- Merge tag 'io_uring-5.6-2020-02-22' of git://git.kernel.dk/linux-block Pull io_uring fixes from Jens Axboe: "Here's a small collection of fixes that were queued up: - Remove unnecessary NULL check (Dan) - Missing io_req_cancelled() call in fallocate (Pavel) - Put the cleanup check for aux data in the right spot (Pavel) - Two fixes for SQPOLL (Stefano, Xiaoguang)" * tag 'io_uring-5.6-2020-02-22' of git://git.kernel.dk/linux-block: io_uring: fix __io_iopoll_check deadlock in io_sq_thread io_uring: prevent sq_thread from spinning when it should stop io_uring: fix use-after-free by io_cleanup_req() io_uring: remove unnecessary NULL checks io_uring: add missing io_req_cancelled()
This commit is contained in:
commit
b88025ea47
@ -1260,6 +1260,9 @@ static void __io_req_aux_free(struct io_kiocb *req)
|
|||||||
{
|
{
|
||||||
struct io_ring_ctx *ctx = req->ctx;
|
struct io_ring_ctx *ctx = req->ctx;
|
||||||
|
|
||||||
|
if (req->flags & REQ_F_NEED_CLEANUP)
|
||||||
|
io_cleanup_req(req);
|
||||||
|
|
||||||
kfree(req->io);
|
kfree(req->io);
|
||||||
if (req->file) {
|
if (req->file) {
|
||||||
if (req->flags & REQ_F_FIXED_FILE)
|
if (req->flags & REQ_F_FIXED_FILE)
|
||||||
@ -1275,9 +1278,6 @@ static void __io_free_req(struct io_kiocb *req)
|
|||||||
{
|
{
|
||||||
__io_req_aux_free(req);
|
__io_req_aux_free(req);
|
||||||
|
|
||||||
if (req->flags & REQ_F_NEED_CLEANUP)
|
|
||||||
io_cleanup_req(req);
|
|
||||||
|
|
||||||
if (req->flags & REQ_F_INFLIGHT) {
|
if (req->flags & REQ_F_INFLIGHT) {
|
||||||
struct io_ring_ctx *ctx = req->ctx;
|
struct io_ring_ctx *ctx = req->ctx;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
@ -1672,11 +1672,17 @@ static void io_iopoll_reap_events(struct io_ring_ctx *ctx)
|
|||||||
mutex_unlock(&ctx->uring_lock);
|
mutex_unlock(&ctx->uring_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
|
static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
|
||||||
long min)
|
long min)
|
||||||
{
|
{
|
||||||
int iters = 0, ret = 0;
|
int iters = 0, ret = 0;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We disallow the app entering submit/complete with polling, but we
|
||||||
|
* still need to lock the ring to prevent racing with polled issue
|
||||||
|
* that got punted to a workqueue.
|
||||||
|
*/
|
||||||
|
mutex_lock(&ctx->uring_lock);
|
||||||
do {
|
do {
|
||||||
int tmin = 0;
|
int tmin = 0;
|
||||||
|
|
||||||
@ -1712,21 +1718,6 @@ static int __io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
|
|||||||
ret = 0;
|
ret = 0;
|
||||||
} while (min && !*nr_events && !need_resched());
|
} while (min && !*nr_events && !need_resched());
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
|
|
||||||
long min)
|
|
||||||
{
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* We disallow the app entering submit/complete with polling, but we
|
|
||||||
* still need to lock the ring to prevent racing with polled issue
|
|
||||||
* that got punted to a workqueue.
|
|
||||||
*/
|
|
||||||
mutex_lock(&ctx->uring_lock);
|
|
||||||
ret = __io_iopoll_check(ctx, nr_events, min);
|
|
||||||
mutex_unlock(&ctx->uring_lock);
|
mutex_unlock(&ctx->uring_lock);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -2517,6 +2508,9 @@ static void io_fallocate_finish(struct io_wq_work **workptr)
|
|||||||
struct io_kiocb *nxt = NULL;
|
struct io_kiocb *nxt = NULL;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
if (io_req_cancelled(req))
|
||||||
|
return;
|
||||||
|
|
||||||
ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off,
|
ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off,
|
||||||
req->sync.len);
|
req->sync.len);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
@ -2904,6 +2898,7 @@ static void io_close_finish(struct io_wq_work **workptr)
|
|||||||
struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
|
struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
|
||||||
struct io_kiocb *nxt = NULL;
|
struct io_kiocb *nxt = NULL;
|
||||||
|
|
||||||
|
/* not cancellable, don't do io_req_cancelled() */
|
||||||
__io_close_finish(req, &nxt);
|
__io_close_finish(req, &nxt);
|
||||||
if (nxt)
|
if (nxt)
|
||||||
io_wq_assign_next(workptr, nxt);
|
io_wq_assign_next(workptr, nxt);
|
||||||
@ -3071,7 +3066,7 @@ static int io_sendmsg(struct io_kiocb *req, struct io_kiocb **nxt,
|
|||||||
if (req->io)
|
if (req->io)
|
||||||
return -EAGAIN;
|
return -EAGAIN;
|
||||||
if (io_alloc_async_ctx(req)) {
|
if (io_alloc_async_ctx(req)) {
|
||||||
if (kmsg && kmsg->iov != kmsg->fast_iov)
|
if (kmsg->iov != kmsg->fast_iov)
|
||||||
kfree(kmsg->iov);
|
kfree(kmsg->iov);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
@ -3225,7 +3220,7 @@ static int io_recvmsg(struct io_kiocb *req, struct io_kiocb **nxt,
|
|||||||
if (req->io)
|
if (req->io)
|
||||||
return -EAGAIN;
|
return -EAGAIN;
|
||||||
if (io_alloc_async_ctx(req)) {
|
if (io_alloc_async_ctx(req)) {
|
||||||
if (kmsg && kmsg->iov != kmsg->fast_iov)
|
if (kmsg->iov != kmsg->fast_iov)
|
||||||
kfree(kmsg->iov);
|
kfree(kmsg->iov);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
@ -5114,7 +5109,7 @@ static int io_sq_thread(void *data)
|
|||||||
*/
|
*/
|
||||||
mutex_lock(&ctx->uring_lock);
|
mutex_lock(&ctx->uring_lock);
|
||||||
if (!list_empty(&ctx->poll_list))
|
if (!list_empty(&ctx->poll_list))
|
||||||
__io_iopoll_check(ctx, &nr_events, 0);
|
io_iopoll_getevents(ctx, &nr_events, 0);
|
||||||
else
|
else
|
||||||
inflight = 0;
|
inflight = 0;
|
||||||
mutex_unlock(&ctx->uring_lock);
|
mutex_unlock(&ctx->uring_lock);
|
||||||
@ -5138,6 +5133,18 @@ static int io_sq_thread(void *data)
|
|||||||
* to enter the kernel to reap and flush events.
|
* to enter the kernel to reap and flush events.
|
||||||
*/
|
*/
|
||||||
if (!to_submit || ret == -EBUSY) {
|
if (!to_submit || ret == -EBUSY) {
|
||||||
|
/*
|
||||||
|
* Drop cur_mm before scheduling, we can't hold it for
|
||||||
|
* long periods (or over schedule()). Do this before
|
||||||
|
* adding ourselves to the waitqueue, as the unuse/drop
|
||||||
|
* may sleep.
|
||||||
|
*/
|
||||||
|
if (cur_mm) {
|
||||||
|
unuse_mm(cur_mm);
|
||||||
|
mmput(cur_mm);
|
||||||
|
cur_mm = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We're polling. If we're within the defined idle
|
* We're polling. If we're within the defined idle
|
||||||
* period, then let us spin without work before going
|
* period, then let us spin without work before going
|
||||||
@ -5152,18 +5159,6 @@ static int io_sq_thread(void *data)
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Drop cur_mm before scheduling, we can't hold it for
|
|
||||||
* long periods (or over schedule()). Do this before
|
|
||||||
* adding ourselves to the waitqueue, as the unuse/drop
|
|
||||||
* may sleep.
|
|
||||||
*/
|
|
||||||
if (cur_mm) {
|
|
||||||
unuse_mm(cur_mm);
|
|
||||||
mmput(cur_mm);
|
|
||||||
cur_mm = NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
prepare_to_wait(&ctx->sqo_wait, &wait,
|
prepare_to_wait(&ctx->sqo_wait, &wait,
|
||||||
TASK_INTERRUPTIBLE);
|
TASK_INTERRUPTIBLE);
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user