From 1bacd264d3c3a05de4afdd1712c9dd6ccebb9490 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 20 Jun 2022 06:39:27 -0600 Subject: [PATCH 1/8] io_uring: mark reissue requests with REQ_F_PARTIAL_IO If we mark for reissue, we assume that the buffer will remain stable. Hence if are using a provided buffer, we need to ensure that we stick with it for the duration of that request. This only affects block devices that use provided buffers, as those are the only ones that get marked with REQ_F_REISSUE. Signed-off-by: Jens Axboe --- fs/io_uring.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index d3ee4fc532fa..87c65a358678 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -3437,7 +3437,7 @@ static bool __io_complete_rw_common(struct io_kiocb *req, long res) if (unlikely(res != req->cqe.res)) { if ((res == -EAGAIN || res == -EOPNOTSUPP) && io_rw_should_reissue(req)) { - req->flags |= REQ_F_REISSUE; + req->flags |= REQ_F_REISSUE | REQ_F_PARTIAL_IO; return true; } req_set_fail(req); @@ -3487,7 +3487,7 @@ static void io_complete_rw_iopoll(struct kiocb *kiocb, long res) kiocb_end_write(req); if (unlikely(res != req->cqe.res)) { if (res == -EAGAIN && io_rw_should_reissue(req)) { - req->flags |= REQ_F_REISSUE; + req->flags |= REQ_F_REISSUE | REQ_F_PARTIAL_IO; return; } req->cqe.res = res; From b60cac14bb3c88cff2a7088d9095b01a80938c41 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 21 Jun 2022 07:47:13 -0600 Subject: [PATCH 2/8] io_uring: fix merge error in checking send/recv addr2 flags With the dropping of the IOPOLL checking in the per-opcode handlers, we inadvertently left two checks in the recv/recvmsg and send/sendmsg prep handlers for the same thing, and one of them includes addr2 which holds the flags for these opcodes. Fix it up and kill the redundant checks. Signed-off-by: Jens Axboe --- fs/io_uring.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 87c65a358678..05508fe92b9c 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -6077,8 +6077,6 @@ static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) if (unlikely(sqe->file_index)) return -EINVAL; - if (unlikely(sqe->addr2 || sqe->file_index)) - return -EINVAL; sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr)); sr->len = READ_ONCE(sqe->len); @@ -6315,8 +6313,6 @@ static int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) if (unlikely(sqe->file_index)) return -EINVAL; - if (unlikely(sqe->addr2 || sqe->file_index)) - return -EINVAL; sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr)); sr->len = READ_ONCE(sqe->len); From aacf2f9f382c91df73f33317e28a4c34c8038986 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Tue, 21 Jun 2022 13:25:06 +0100 Subject: [PATCH 3/8] io_uring: fix req->apoll_events apoll_events should be set once in the beginning of poll arming just as poll->events and not change after. However, currently io_uring resets it on each __io_poll_execute() for no clear reason. There is also a place in __io_arm_poll_handler() where we add EPOLLONESHOT to downgrade a multishot, but forget to do the same thing with ->apoll_events, which is buggy. Fixes: 81459350d581e ("io_uring: cache req->apoll->events in req->cflags") Signed-off-by: Pavel Begunkov Reviewed-by: Hao Xu Link: https://lore.kernel.org/r/0aef40399ba75b1a4d2c2e85e6e8fd93c02fc6e4.1655814213.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 05508fe92b9c..dffa85d4dc7a 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -6950,7 +6950,8 @@ static void io_apoll_task_func(struct io_kiocb *req, bool *locked) io_req_complete_failed(req, ret); } -static void __io_poll_execute(struct io_kiocb *req, int mask, __poll_t events) +static void __io_poll_execute(struct io_kiocb *req, int mask, + __poll_t __maybe_unused events) { req->cqe.res = mask; /* @@ -6959,7 +6960,6 @@ static void __io_poll_execute(struct io_kiocb *req, int mask, __poll_t events) * CPU. We want to avoid pulling in req->apoll->events for that * case. */ - req->apoll_events = events; if (req->opcode == IORING_OP_POLL_ADD) req->io_task_work.func = io_poll_task_func; else @@ -7110,6 +7110,8 @@ static int __io_arm_poll_handler(struct io_kiocb *req, io_init_poll_iocb(poll, mask, io_poll_wake); poll->file = req->file; + req->apoll_events = poll->events; + ipt->pt._key = mask; ipt->req = req; ipt->error = 0; @@ -7140,8 +7142,10 @@ static int __io_arm_poll_handler(struct io_kiocb *req, if (mask) { /* can't multishot if failed, just queue the event we've got */ - if (unlikely(ipt->error || !ipt->nr_entries)) + if (unlikely(ipt->error || !ipt->nr_entries)) { poll->events |= EPOLLONESHOT; + req->apoll_events |= EPOLLONESHOT; + } __io_poll_execute(req, mask, poll->events); return 0; } @@ -7388,7 +7392,7 @@ static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe return -EINVAL; io_req_set_refcount(req); - req->apoll_events = poll->events = io_poll_parse_events(sqe, flags); + poll->events = io_poll_parse_events(sqe, flags); return 0; } From c487a5ad48831afa6784b368ec40d0ee50f2fe1b Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Wed, 22 Jun 2022 00:00:35 +0100 Subject: [PATCH 4/8] io_uring: fail links when poll fails Don't forget to cancel all linked requests of poll request when __io_arm_poll_handler() failed. Fixes: aa43477b04025 ("io_uring: poll rework") Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/a78aad962460f9fdfe4aa4c0b62425c88f9415bc.1655852245.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/fs/io_uring.c b/fs/io_uring.c index dffa85d4dc7a..d5ea3c6167b5 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -7405,6 +7405,8 @@ static int io_poll_add(struct io_kiocb *req, unsigned int issue_flags) ipt.pt._qproc = io_poll_queue_proc; ret = __io_arm_poll_handler(req, &req->poll, &ipt, poll->events); + if (!ret && ipt.error) + req_set_fail(req); ret = ret ?: ipt.error; if (ret) __io_req_complete(req, issue_flags, ret, 0); From 9d2ad2947a53abf5e5e6527a9eeed50a3a4cbc72 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Wed, 22 Jun 2022 00:00:36 +0100 Subject: [PATCH 5/8] io_uring: fix wrong arm_poll error handling Leaving ip.error set when a request was punted to task_work execution is problematic, don't forget to clear it. Fixes: aa43477b04025 ("io_uring: poll rework") Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/a6c84ef4182c6962380aebe11b35bdcb25b0ccfb.1655852245.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 1 + 1 file changed, 1 insertion(+) diff --git a/fs/io_uring.c b/fs/io_uring.c index d5ea3c6167b5..cb719a53b8bd 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -7145,6 +7145,7 @@ static int __io_arm_poll_handler(struct io_kiocb *req, if (unlikely(ipt->error || !ipt->nr_entries)) { poll->events |= EPOLLONESHOT; req->apoll_events |= EPOLLONESHOT; + ipt->error = 0; } __io_poll_execute(req, mask, poll->events); return 0; From c0737fa9a5a5cf5a053bcc983f72d58919b997c6 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Wed, 22 Jun 2022 00:00:37 +0100 Subject: [PATCH 6/8] io_uring: fix double poll leak on repolling We have re-polling for partial IO, so a request can be polled twice. If it used two poll entries the first time then on the second io_arm_poll_handler() it will find the old apoll entry and NULL kmalloc()'ed second entry, i.e. apoll->double_poll, so leaking it. Fixes: 10c873334feba ("io_uring: allow re-poll if we made progress") Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/fee2452494222ecc7f1f88c8fb659baef971414a.1655852245.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 1 + 1 file changed, 1 insertion(+) diff --git a/fs/io_uring.c b/fs/io_uring.c index cb719a53b8bd..5c95755619e2 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -7208,6 +7208,7 @@ static int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags) mask |= EPOLLEXCLUSIVE; if (req->flags & REQ_F_POLLED) { apoll = req->apoll; + kfree(apoll->double_poll); } else if (!(issue_flags & IO_URING_F_UNLOCKED) && !list_empty(&ctx->apoll_cache)) { apoll = list_first_entry(&ctx->apoll_cache, struct async_poll, From e70b64a3f28b9f54602ae3e706b1dc1338de3df7 Mon Sep 17 00:00:00 2001 From: Dylan Yudaken Date: Thu, 23 Jun 2022 01:37:43 -0700 Subject: [PATCH 7/8] io_uring: move io_uring_get_opcode out of TP_printk The TP_printk macro's are not supposed to use custom code ([1]) or else tools such as perf cannot use these events. Convert the opcode string representation to use the __string wiring that the event framework provides ([2]). [1]: https://lwn.net/Articles/379903/ [2]: https://lwn.net/Articles/381064/ Fixes: 033b87d24f72 ("io_uring: use the text representation of ops in trace") Signed-off-by: Dylan Yudaken Link: https://lore.kernel.org/r/20220623083743.2648321-1-dylany@fb.com [axboe: fixup spurious removal of sq_thread assignment] Signed-off-by: Jens Axboe --- include/trace/events/io_uring.h | 42 +++++++++++++++++++++++++++------ 1 file changed, 35 insertions(+), 7 deletions(-) diff --git a/include/trace/events/io_uring.h b/include/trace/events/io_uring.h index 66fcc5a1a5b1..aa2f951b07cd 100644 --- a/include/trace/events/io_uring.h +++ b/include/trace/events/io_uring.h @@ -158,6 +158,8 @@ TRACE_EVENT(io_uring_queue_async_work, __field( unsigned int, flags ) __field( struct io_wq_work *, work ) __field( int, rw ) + + __string( op_str, io_uring_get_opcode(opcode) ) ), TP_fast_assign( @@ -168,11 +170,13 @@ TRACE_EVENT(io_uring_queue_async_work, __entry->opcode = opcode; __entry->work = work; __entry->rw = rw; + + __assign_str(op_str, io_uring_get_opcode(opcode)); ), TP_printk("ring %p, request %p, user_data 0x%llx, opcode %s, flags 0x%x, %s queue, work %p", __entry->ctx, __entry->req, __entry->user_data, - io_uring_get_opcode(__entry->opcode), + __get_str(op_str), __entry->flags, __entry->rw ? "hashed" : "normal", __entry->work) ); @@ -198,6 +202,8 @@ TRACE_EVENT(io_uring_defer, __field( void *, req ) __field( unsigned long long, data ) __field( u8, opcode ) + + __string( op_str, io_uring_get_opcode(opcode) ) ), TP_fast_assign( @@ -205,11 +211,13 @@ TRACE_EVENT(io_uring_defer, __entry->req = req; __entry->data = user_data; __entry->opcode = opcode; + + __assign_str(op_str, io_uring_get_opcode(opcode)); ), TP_printk("ring %p, request %p, user_data 0x%llx, opcode %s", __entry->ctx, __entry->req, __entry->data, - io_uring_get_opcode(__entry->opcode)) + __get_str(op_str)) ); /** @@ -298,6 +306,8 @@ TRACE_EVENT(io_uring_fail_link, __field( unsigned long long, user_data ) __field( u8, opcode ) __field( void *, link ) + + __string( op_str, io_uring_get_opcode(opcode) ) ), TP_fast_assign( @@ -306,11 +316,13 @@ TRACE_EVENT(io_uring_fail_link, __entry->user_data = user_data; __entry->opcode = opcode; __entry->link = link; + + __assign_str(op_str, io_uring_get_opcode(opcode)); ), TP_printk("ring %p, request %p, user_data 0x%llx, opcode %s, link %p", __entry->ctx, __entry->req, __entry->user_data, - io_uring_get_opcode(__entry->opcode), __entry->link) + __get_str(op_str), __entry->link) ); /** @@ -390,6 +402,8 @@ TRACE_EVENT(io_uring_submit_sqe, __field( u32, flags ) __field( bool, force_nonblock ) __field( bool, sq_thread ) + + __string( op_str, io_uring_get_opcode(opcode) ) ), TP_fast_assign( @@ -400,11 +414,13 @@ TRACE_EVENT(io_uring_submit_sqe, __entry->flags = flags; __entry->force_nonblock = force_nonblock; __entry->sq_thread = sq_thread; + + __assign_str(op_str, io_uring_get_opcode(opcode)); ), TP_printk("ring %p, req %p, user_data 0x%llx, opcode %s, flags 0x%x, " "non block %d, sq_thread %d", __entry->ctx, __entry->req, - __entry->user_data, io_uring_get_opcode(__entry->opcode), + __entry->user_data, __get_str(op_str), __entry->flags, __entry->force_nonblock, __entry->sq_thread) ); @@ -435,6 +451,8 @@ TRACE_EVENT(io_uring_poll_arm, __field( u8, opcode ) __field( int, mask ) __field( int, events ) + + __string( op_str, io_uring_get_opcode(opcode) ) ), TP_fast_assign( @@ -444,11 +462,13 @@ TRACE_EVENT(io_uring_poll_arm, __entry->opcode = opcode; __entry->mask = mask; __entry->events = events; + + __assign_str(op_str, io_uring_get_opcode(opcode)); ), TP_printk("ring %p, req %p, user_data 0x%llx, opcode %s, mask 0x%x, events 0x%x", __entry->ctx, __entry->req, __entry->user_data, - io_uring_get_opcode(__entry->opcode), + __get_str(op_str), __entry->mask, __entry->events) ); @@ -474,6 +494,8 @@ TRACE_EVENT(io_uring_task_add, __field( unsigned long long, user_data ) __field( u8, opcode ) __field( int, mask ) + + __string( op_str, io_uring_get_opcode(opcode) ) ), TP_fast_assign( @@ -482,11 +504,13 @@ TRACE_EVENT(io_uring_task_add, __entry->user_data = user_data; __entry->opcode = opcode; __entry->mask = mask; + + __assign_str(op_str, io_uring_get_opcode(opcode)); ), TP_printk("ring %p, req %p, user_data 0x%llx, opcode %s, mask %x", __entry->ctx, __entry->req, __entry->user_data, - io_uring_get_opcode(__entry->opcode), + __get_str(op_str), __entry->mask) ); @@ -523,6 +547,8 @@ TRACE_EVENT(io_uring_req_failed, __field( u64, pad1 ) __field( u64, addr3 ) __field( int, error ) + + __string( op_str, io_uring_get_opcode(sqe->opcode) ) ), TP_fast_assign( @@ -542,6 +568,8 @@ TRACE_EVENT(io_uring_req_failed, __entry->pad1 = sqe->__pad2[0]; __entry->addr3 = sqe->addr3; __entry->error = error; + + __assign_str(op_str, io_uring_get_opcode(sqe->opcode)); ), TP_printk("ring %p, req %p, user_data 0x%llx, " @@ -550,7 +578,7 @@ TRACE_EVENT(io_uring_req_failed, "personality=%d, file_index=%d, pad=0x%llx, addr3=%llx, " "error=%d", __entry->ctx, __entry->req, __entry->user_data, - io_uring_get_opcode(__entry->opcode), + __get_str(op_str), __entry->flags, __entry->ioprio, (unsigned long long)__entry->off, (unsigned long long) __entry->addr, __entry->len, From 386e4fb6962b9f248a80f8870aea0870ca603e89 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 23 Jun 2022 11:06:43 -0600 Subject: [PATCH 8/8] io_uring: use original request task for inflight tracking In prior kernels, we did file assignment always at prep time. This meant that req->task == current. But after deferring that assignment and then pushing the inflight tracking back in, we've got the inflight tracking using current when it should in fact now be using req->task. Fixup that error introduced by adding the inflight tracking back after file assignments got modifed. Fixes: 9cae36a094e7 ("io_uring: reinstate the inflight tracking") Signed-off-by: Jens Axboe --- fs/io_uring.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 5c95755619e2..5ff2cdb425bc 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1975,7 +1975,7 @@ static inline void io_req_track_inflight(struct io_kiocb *req) { if (!(req->flags & REQ_F_INFLIGHT)) { req->flags |= REQ_F_INFLIGHT; - atomic_inc(¤t->io_uring->inflight_tracked); + atomic_inc(&req->task->io_uring->inflight_tracked); } }