Merge branch 'io_uring-5.8' into for-5.9/io_uring

Merge in changes that went into 5.8-rc3. GIT will silently do the
merge, but we still need a tweak on top of that since
io_complete_rw_common() was modified to take a io_comp_state pointer.
The auto-merge fails on that, and we end up with something that
doesn't compile.

* io_uring-5.8:
  io_uring: fix current->mm NULL dereference on exit
  io_uring: fix hanging iopoll in case of -EAGAIN
  io_uring: fix io_sq_thread no schedule when busy

Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Jens Axboe 2020-06-26 13:44:16 -06:00
commit 2237d76530

View File

@ -905,6 +905,8 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx,
struct io_uring_files_update *ip, struct io_uring_files_update *ip,
unsigned nr_args); unsigned nr_args);
static int io_grab_files(struct io_kiocb *req); static int io_grab_files(struct io_kiocb *req);
static void io_complete_rw_common(struct kiocb *kiocb, long res,
struct io_comp_state *cs);
static void io_cleanup_req(struct io_kiocb *req); static void io_cleanup_req(struct io_kiocb *req);
static int io_file_get(struct io_submit_state *state, struct io_kiocb *req, static int io_file_get(struct io_submit_state *state, struct io_kiocb *req,
int fd, struct file **out_file, bool fixed); int fd, struct file **out_file, bool fixed);
@ -1897,6 +1899,14 @@ static void io_iopoll_queue(struct list_head *again)
do { do {
req = list_first_entry(again, struct io_kiocb, list); req = list_first_entry(again, struct io_kiocb, list);
list_del(&req->list); list_del(&req->list);
/* shouldn't happen unless io_uring is dying, cancel reqs */
if (unlikely(!current->mm)) {
io_complete_rw_common(&req->rw.kiocb, -EAGAIN, NULL);
io_put_req(req);
continue;
}
refcount_inc(&req->refs); refcount_inc(&req->refs);
io_queue_async_work(req); io_queue_async_work(req);
} while (!list_empty(again)); } while (!list_empty(again));
@ -2221,10 +2231,8 @@ static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
WRITE_ONCE(req->result, res); WRITE_ONCE(req->result, res);
/* order with io_poll_complete() checking ->result */ /* order with io_poll_complete() checking ->result */
if (res != -EAGAIN) { smp_wmb();
smp_wmb(); WRITE_ONCE(req->iopoll_completed, 1);
WRITE_ONCE(req->iopoll_completed, 1);
}
} }
/* /*
@ -5680,9 +5688,6 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
if ((ctx->flags & IORING_SETUP_IOPOLL) && req->file) { if ((ctx->flags & IORING_SETUP_IOPOLL) && req->file) {
const bool in_async = io_wq_current_is_worker(); const bool in_async = io_wq_current_is_worker();
if (req->result == -EAGAIN)
return -EAGAIN;
/* workqueue context doesn't hold uring_lock, grab it now */ /* workqueue context doesn't hold uring_lock, grab it now */
if (in_async) if (in_async)
mutex_lock(&ctx->uring_lock); mutex_lock(&ctx->uring_lock);
@ -6341,7 +6346,7 @@ static int io_sq_thread(void *data)
* If submit got -EBUSY, flag us as needing the application * If submit got -EBUSY, flag us as needing the application
* to enter the kernel to reap and flush events. * to enter the kernel to reap and flush events.
*/ */
if (!to_submit || ret == -EBUSY) { if (!to_submit || ret == -EBUSY || need_resched()) {
/* /*
* Drop cur_mm before scheduling, we can't hold it for * Drop cur_mm before scheduling, we can't hold it for
* long periods (or over schedule()). Do this before * long periods (or over schedule()). Do this before
@ -6357,7 +6362,7 @@ static int io_sq_thread(void *data)
* more IO, we should wait for the application to * more IO, we should wait for the application to
* reap events and wake us up. * reap events and wake us up.
*/ */
if (!list_empty(&ctx->poll_list) || if (!list_empty(&ctx->poll_list) || need_resched() ||
(!time_after(jiffies, timeout) && ret != -EBUSY && (!time_after(jiffies, timeout) && ret != -EBUSY &&
!percpu_ref_is_dying(&ctx->refs))) { !percpu_ref_is_dying(&ctx->refs))) {
if (current->task_works) if (current->task_works)