mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-12 07:14:03 +08:00
io_uring-5.12-2021-03-21
-----BEGIN PGP SIGNATURE----- iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAmBXahgQHGF4Ym9lQGtl cm5lbC5kawAKCRD301j7KXHgppMVEAC+Kn8AmNPbV7/AX3jfZYEh1UwyPetpJQ2m FiWkXnuG85kM3UD12S5RYEYkHxzSob2d1yfZ+kL1TAkVJaz3FVoUU9ms0guXfCNb l8k5fgK2zlegCyBIsPnouR/zV4Y/GJjf+tY0/c1e2Ovfl1zjCW486PvwjJzjMy8b rXUi3MMKB3JPltML152qi9S1lJJuIHMB22ZUdTiyX+u4RtCzvGHGZmlpb4sw73RF IRN7qBDYy5Pth+PCUBrhveIPmF/QSKhPHTarczIkgqSw/fSslsgEdBe88fxBDfbf +WIaYifwqDongT4wkboXFUPTkSUlA+TbvnMW6dRZJTJvRspKz0SV4l+xC/QvT231 JqHqvRk2FkdVlpfXBvdVz94jLFiBJSl02QqTseQGbRdFY4BvxqkC15z4HkPdldJ8 QM2+6ZfzVWbzZkssgK42kTuDq9EX5Ks/+rOkIM/z2L5D00sbeeCVGCeNXf3uS7So s7pskeTOLoXSvTpwzzEBEpJ6ebU698B1hx++Hjuy95Zifs2holkHXu36wvYmWFDm CmxZ48waSQJq/emjbOSYfJthKc/TmaUzocsnMvSA5eoCmP445OUQJJTfifEj50if /k0+XTi1DOrYHyy8R7a8T7xXDJIlMGY7fZyvmzopfRlJHnaHkeBfpbSaPCZXoAiJ 8T/mkYohAw== =xaEf -----END PGP SIGNATURE----- Merge tag 'io_uring-5.12-2021-03-21' of git://git.kernel.dk/linux-block Pull io_uring followup fixes from Jens Axboe: - The SIGSTOP change from Eric, so we properly ignore that for PF_IO_WORKER threads. - Disallow sending signals to PF_IO_WORKER threads in general, we're not interested in having them funnel back to the io_uring owning task. - Stable fix from Stefan, ensuring we properly break links for short send/sendmsg recv/recvmsg if MSG_WAITALL is set. - Catch and loop when needing to run task_work before a PF_IO_WORKER threads goes to sleep. * tag 'io_uring-5.12-2021-03-21' of git://git.kernel.dk/linux-block: io_uring: call req_set_fail_links() on short send[msg]()/recv[msg]() with MSG_WAITALL io-wq: ensure task is running before processing task_work signal: don't allow STOP on PF_IO_WORKER threads signal: don't allow sending any signals to PF_IO_WORKER threads
This commit is contained in:
commit
2c41fab1c6
@ -386,13 +386,16 @@ static struct io_wq_work *io_get_next_work(struct io_wqe *wqe)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void io_flush_signals(void)
|
||||
static bool io_flush_signals(void)
|
||||
{
|
||||
if (unlikely(test_tsk_thread_flag(current, TIF_NOTIFY_SIGNAL))) {
|
||||
__set_current_state(TASK_RUNNING);
|
||||
if (current->task_works)
|
||||
task_work_run();
|
||||
clear_tsk_thread_flag(current, TIF_NOTIFY_SIGNAL);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static void io_assign_current_work(struct io_worker *worker,
|
||||
@ -499,7 +502,8 @@ loop:
|
||||
}
|
||||
__io_worker_idle(wqe, worker);
|
||||
raw_spin_unlock_irq(&wqe->lock);
|
||||
io_flush_signals();
|
||||
if (io_flush_signals())
|
||||
continue;
|
||||
ret = schedule_timeout(WORKER_IDLE_TIMEOUT);
|
||||
if (try_to_freeze() || ret)
|
||||
continue;
|
||||
|
@ -4386,6 +4386,7 @@ static int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
|
||||
struct io_async_msghdr iomsg, *kmsg;
|
||||
struct socket *sock;
|
||||
unsigned flags;
|
||||
int min_ret = 0;
|
||||
int ret;
|
||||
|
||||
sock = sock_from_file(req->file);
|
||||
@ -4406,6 +4407,9 @@ static int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
|
||||
else if (issue_flags & IO_URING_F_NONBLOCK)
|
||||
flags |= MSG_DONTWAIT;
|
||||
|
||||
if (flags & MSG_WAITALL)
|
||||
min_ret = iov_iter_count(&kmsg->msg.msg_iter);
|
||||
|
||||
ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
|
||||
if ((issue_flags & IO_URING_F_NONBLOCK) && ret == -EAGAIN)
|
||||
return io_setup_async_msg(req, kmsg);
|
||||
@ -4416,7 +4420,7 @@ static int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
|
||||
if (kmsg->free_iov)
|
||||
kfree(kmsg->free_iov);
|
||||
req->flags &= ~REQ_F_NEED_CLEANUP;
|
||||
if (ret < 0)
|
||||
if (ret < min_ret)
|
||||
req_set_fail_links(req);
|
||||
__io_req_complete(req, issue_flags, ret, 0);
|
||||
return 0;
|
||||
@ -4429,6 +4433,7 @@ static int io_send(struct io_kiocb *req, unsigned int issue_flags)
|
||||
struct iovec iov;
|
||||
struct socket *sock;
|
||||
unsigned flags;
|
||||
int min_ret = 0;
|
||||
int ret;
|
||||
|
||||
sock = sock_from_file(req->file);
|
||||
@ -4450,6 +4455,9 @@ static int io_send(struct io_kiocb *req, unsigned int issue_flags)
|
||||
else if (issue_flags & IO_URING_F_NONBLOCK)
|
||||
flags |= MSG_DONTWAIT;
|
||||
|
||||
if (flags & MSG_WAITALL)
|
||||
min_ret = iov_iter_count(&msg.msg_iter);
|
||||
|
||||
msg.msg_flags = flags;
|
||||
ret = sock_sendmsg(sock, &msg);
|
||||
if ((issue_flags & IO_URING_F_NONBLOCK) && ret == -EAGAIN)
|
||||
@ -4457,7 +4465,7 @@ static int io_send(struct io_kiocb *req, unsigned int issue_flags)
|
||||
if (ret == -ERESTARTSYS)
|
||||
ret = -EINTR;
|
||||
|
||||
if (ret < 0)
|
||||
if (ret < min_ret)
|
||||
req_set_fail_links(req);
|
||||
__io_req_complete(req, issue_flags, ret, 0);
|
||||
return 0;
|
||||
@ -4609,6 +4617,7 @@ static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
|
||||
struct socket *sock;
|
||||
struct io_buffer *kbuf;
|
||||
unsigned flags;
|
||||
int min_ret = 0;
|
||||
int ret, cflags = 0;
|
||||
bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
|
||||
|
||||
@ -4640,6 +4649,9 @@ static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
|
||||
else if (force_nonblock)
|
||||
flags |= MSG_DONTWAIT;
|
||||
|
||||
if (flags & MSG_WAITALL)
|
||||
min_ret = iov_iter_count(&kmsg->msg.msg_iter);
|
||||
|
||||
ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.umsg,
|
||||
kmsg->uaddr, flags);
|
||||
if (force_nonblock && ret == -EAGAIN)
|
||||
@ -4653,7 +4665,7 @@ static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
|
||||
if (kmsg->free_iov)
|
||||
kfree(kmsg->free_iov);
|
||||
req->flags &= ~REQ_F_NEED_CLEANUP;
|
||||
if (ret < 0)
|
||||
if (ret < min_ret || ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))))
|
||||
req_set_fail_links(req);
|
||||
__io_req_complete(req, issue_flags, ret, cflags);
|
||||
return 0;
|
||||
@ -4668,6 +4680,7 @@ static int io_recv(struct io_kiocb *req, unsigned int issue_flags)
|
||||
struct socket *sock;
|
||||
struct iovec iov;
|
||||
unsigned flags;
|
||||
int min_ret = 0;
|
||||
int ret, cflags = 0;
|
||||
bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
|
||||
|
||||
@ -4699,6 +4712,9 @@ static int io_recv(struct io_kiocb *req, unsigned int issue_flags)
|
||||
else if (force_nonblock)
|
||||
flags |= MSG_DONTWAIT;
|
||||
|
||||
if (flags & MSG_WAITALL)
|
||||
min_ret = iov_iter_count(&msg.msg_iter);
|
||||
|
||||
ret = sock_recvmsg(sock, &msg, flags);
|
||||
if (force_nonblock && ret == -EAGAIN)
|
||||
return -EAGAIN;
|
||||
@ -4707,7 +4723,7 @@ static int io_recv(struct io_kiocb *req, unsigned int issue_flags)
|
||||
out_free:
|
||||
if (req->flags & REQ_F_BUFFER_SELECTED)
|
||||
cflags = io_put_recv_kbuf(req);
|
||||
if (ret < 0)
|
||||
if (ret < min_ret || ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))))
|
||||
req_set_fail_links(req);
|
||||
__io_req_complete(req, issue_flags, ret, cflags);
|
||||
return 0;
|
||||
|
@ -288,7 +288,8 @@ bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
|
||||
JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
|
||||
BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
|
||||
|
||||
if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
|
||||
if (unlikely(fatal_signal_pending(task) ||
|
||||
(task->flags & (PF_EXITING | PF_IO_WORKER))))
|
||||
return false;
|
||||
|
||||
if (mask & JOBCTL_STOP_SIGMASK)
|
||||
@ -833,6 +834,9 @@ static int check_kill_permission(int sig, struct kernel_siginfo *info,
|
||||
|
||||
if (!valid_signal(sig))
|
||||
return -EINVAL;
|
||||
/* PF_IO_WORKER threads don't take any signals */
|
||||
if (t->flags & PF_IO_WORKER)
|
||||
return -ESRCH;
|
||||
|
||||
if (!si_fromuser(info))
|
||||
return 0;
|
||||
|
Loading…
Reference in New Issue
Block a user