mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-11-23 10:06:28 +08:00
io_uring/kbuf: rename REQ_F_PARTIAL_IO to REQ_F_BL_NO_RECYCLE
We only use the flag for this purpose, so rename it accordingly. This further prevents various other use cases of it, keeping it clean and consistent. Then we can also check it in one spot, when it's being attempted recycled, and remove some dead code in io_kbuf_recycle_ring(). Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
9817ad8589
commit
186daf2385
@ -470,7 +470,6 @@ enum {
|
||||
REQ_F_SKIP_LINK_CQES_BIT,
|
||||
REQ_F_SINGLE_POLL_BIT,
|
||||
REQ_F_DOUBLE_POLL_BIT,
|
||||
REQ_F_PARTIAL_IO_BIT,
|
||||
REQ_F_APOLL_MULTISHOT_BIT,
|
||||
REQ_F_CLEAR_POLLIN_BIT,
|
||||
REQ_F_HASH_LOCKED_BIT,
|
||||
@ -481,6 +480,7 @@ enum {
|
||||
REQ_F_CANCEL_SEQ_BIT,
|
||||
REQ_F_CAN_POLL_BIT,
|
||||
REQ_F_BL_EMPTY_BIT,
|
||||
REQ_F_BL_NO_RECYCLE_BIT,
|
||||
|
||||
/* not a real bit, just to check we're not overflowing the space */
|
||||
__REQ_F_LAST_BIT,
|
||||
@ -543,8 +543,6 @@ enum {
|
||||
REQ_F_SINGLE_POLL = IO_REQ_FLAG(REQ_F_SINGLE_POLL_BIT),
|
||||
/* double poll may active */
|
||||
REQ_F_DOUBLE_POLL = IO_REQ_FLAG(REQ_F_DOUBLE_POLL_BIT),
|
||||
/* request has already done partial IO */
|
||||
REQ_F_PARTIAL_IO = IO_REQ_FLAG(REQ_F_PARTIAL_IO_BIT),
|
||||
/* fast poll multishot mode */
|
||||
REQ_F_APOLL_MULTISHOT = IO_REQ_FLAG(REQ_F_APOLL_MULTISHOT_BIT),
|
||||
/* recvmsg special flag, clear EPOLLIN */
|
||||
@ -559,6 +557,8 @@ enum {
|
||||
REQ_F_CAN_POLL = IO_REQ_FLAG(REQ_F_CAN_POLL_BIT),
|
||||
/* buffer list was empty after selection of buffer */
|
||||
REQ_F_BL_EMPTY = IO_REQ_FLAG(REQ_F_BL_EMPTY_BIT),
|
||||
/* don't recycle provided buffers for this request */
|
||||
REQ_F_BL_NO_RECYCLE = IO_REQ_FLAG(REQ_F_BL_NO_RECYCLE_BIT),
|
||||
};
|
||||
|
||||
typedef void (*io_req_tw_func_t)(struct io_kiocb *req, struct io_tw_state *ts);
|
||||
|
@ -81,15 +81,6 @@ bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags)
|
||||
struct io_buffer_list *bl;
|
||||
struct io_buffer *buf;
|
||||
|
||||
/*
|
||||
* For legacy provided buffer mode, don't recycle if we already did
|
||||
* IO to this buffer. For ring-mapped provided buffer mode, we should
|
||||
* increment ring->head to explicitly monopolize the buffer to avoid
|
||||
* multiple use.
|
||||
*/
|
||||
if (req->flags & REQ_F_PARTIAL_IO)
|
||||
return false;
|
||||
|
||||
io_ring_submit_lock(ctx, issue_flags);
|
||||
|
||||
buf = req->kbuf;
|
||||
|
@ -73,21 +73,9 @@ static inline bool io_kbuf_recycle_ring(struct io_kiocb *req)
|
||||
* to monopolize the buffer.
|
||||
*/
|
||||
if (req->buf_list) {
|
||||
if (req->flags & REQ_F_PARTIAL_IO) {
|
||||
/*
|
||||
* If we end up here, then the io_uring_lock has
|
||||
* been kept held since we retrieved the buffer.
|
||||
* For the io-wq case, we already cleared
|
||||
* req->buf_list when the buffer was retrieved,
|
||||
* hence it cannot be set here for that case.
|
||||
*/
|
||||
req->buf_list->head++;
|
||||
req->buf_list = NULL;
|
||||
} else {
|
||||
req->buf_index = req->buf_list->bgid;
|
||||
req->flags &= ~REQ_F_BUFFER_RING;
|
||||
return true;
|
||||
}
|
||||
req->buf_index = req->buf_list->bgid;
|
||||
req->flags &= ~REQ_F_BUFFER_RING;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
@ -101,6 +89,8 @@ static inline bool io_do_buffer_select(struct io_kiocb *req)
|
||||
|
||||
static inline bool io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
|
||||
{
|
||||
if (req->flags & REQ_F_BL_NO_RECYCLE)
|
||||
return false;
|
||||
if (req->flags & REQ_F_BUFFER_SELECTED)
|
||||
return io_kbuf_recycle_legacy(req, issue_flags);
|
||||
if (req->flags & REQ_F_BUFFER_RING)
|
||||
|
@ -456,7 +456,7 @@ int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
|
||||
kmsg->msg.msg_controllen = 0;
|
||||
kmsg->msg.msg_control = NULL;
|
||||
sr->done_io += ret;
|
||||
req->flags |= REQ_F_PARTIAL_IO;
|
||||
req->flags |= REQ_F_BL_NO_RECYCLE;
|
||||
return io_setup_async_msg(req, kmsg, issue_flags);
|
||||
}
|
||||
if (ret == -ERESTARTSYS)
|
||||
@ -535,7 +535,7 @@ int io_send(struct io_kiocb *req, unsigned int issue_flags)
|
||||
sr->len -= ret;
|
||||
sr->buf += ret;
|
||||
sr->done_io += ret;
|
||||
req->flags |= REQ_F_PARTIAL_IO;
|
||||
req->flags |= REQ_F_BL_NO_RECYCLE;
|
||||
return io_setup_async_addr(req, &__address, issue_flags);
|
||||
}
|
||||
if (ret == -ERESTARTSYS)
|
||||
@ -907,7 +907,7 @@ retry_multishot:
|
||||
}
|
||||
if (ret > 0 && io_net_retry(sock, flags)) {
|
||||
sr->done_io += ret;
|
||||
req->flags |= REQ_F_PARTIAL_IO;
|
||||
req->flags |= REQ_F_BL_NO_RECYCLE;
|
||||
return io_setup_async_msg(req, kmsg, issue_flags);
|
||||
}
|
||||
if (ret == -ERESTARTSYS)
|
||||
@ -1007,7 +1007,7 @@ retry_multishot:
|
||||
sr->len -= ret;
|
||||
sr->buf += ret;
|
||||
sr->done_io += ret;
|
||||
req->flags |= REQ_F_PARTIAL_IO;
|
||||
req->flags |= REQ_F_BL_NO_RECYCLE;
|
||||
return -EAGAIN;
|
||||
}
|
||||
if (ret == -ERESTARTSYS)
|
||||
@ -1250,7 +1250,7 @@ int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
|
||||
zc->len -= ret;
|
||||
zc->buf += ret;
|
||||
zc->done_io += ret;
|
||||
req->flags |= REQ_F_PARTIAL_IO;
|
||||
req->flags |= REQ_F_BL_NO_RECYCLE;
|
||||
return io_setup_async_addr(req, &__address, issue_flags);
|
||||
}
|
||||
if (ret == -ERESTARTSYS)
|
||||
@ -1320,7 +1320,7 @@ int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
|
||||
|
||||
if (ret > 0 && io_net_retry(sock, flags)) {
|
||||
sr->done_io += ret;
|
||||
req->flags |= REQ_F_PARTIAL_IO;
|
||||
req->flags |= REQ_F_BL_NO_RECYCLE;
|
||||
return io_setup_async_msg(req, kmsg, issue_flags);
|
||||
}
|
||||
if (ret == -ERESTARTSYS)
|
||||
|
@ -275,7 +275,7 @@ static bool __io_complete_rw_common(struct io_kiocb *req, long res)
|
||||
* current cycle.
|
||||
*/
|
||||
io_req_io_end(req);
|
||||
req->flags |= REQ_F_REISSUE | REQ_F_PARTIAL_IO;
|
||||
req->flags |= REQ_F_REISSUE | REQ_F_BL_NO_RECYCLE;
|
||||
return true;
|
||||
}
|
||||
req_set_fail(req);
|
||||
@ -342,7 +342,7 @@ static void io_complete_rw_iopoll(struct kiocb *kiocb, long res)
|
||||
io_req_end_write(req);
|
||||
if (unlikely(res != req->cqe.res)) {
|
||||
if (res == -EAGAIN && io_rw_should_reissue(req)) {
|
||||
req->flags |= REQ_F_REISSUE | REQ_F_PARTIAL_IO;
|
||||
req->flags |= REQ_F_REISSUE | REQ_F_BL_NO_RECYCLE;
|
||||
return;
|
||||
}
|
||||
req->cqe.res = res;
|
||||
|
Loading…
Reference in New Issue
Block a user