task_work: Introduce task_work_pending

Wrap the test of task->task_works in a helper function to make
it clear what is being tested.

All of the other readers of task->task_work use READ_ONCE and this is
even necessary on current as other processes can update
task->task_work.  So for consistency I have added READ_ONCE into
task_work_pending.

Reviewed-by: Kees Cook <keescook@chromium.org>
Link: https://lkml.kernel.org/r/20220309162454.123006-7-ebiederm@xmission.com
Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com>
This commit is contained in:
Eric W. Biederman 2022-02-09 08:52:41 -06:00
parent 8ca07e17c9
commit 7f62d40d9c
5 changed files with 13 additions and 8 deletions

View File

@ -2590,7 +2590,7 @@ static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
static inline bool io_run_task_work(void)
{
if (test_thread_flag(TIF_NOTIFY_SIGNAL) || current->task_works) {
if (test_thread_flag(TIF_NOTIFY_SIGNAL) || task_work_pending(current)) {
__set_current_state(TASK_RUNNING);
tracehook_notify_signal();
return true;
@ -7602,7 +7602,7 @@ static int io_sq_thread(void *data)
}
prepare_to_wait(&sqd->wait, &wait, TASK_INTERRUPTIBLE);
if (!io_sqd_events_pending(sqd) && !current->task_works) {
if (!io_sqd_events_pending(sqd) && !task_work_pending(current)) {
bool needs_sched = true;
list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
@ -10321,7 +10321,7 @@ static __cold void __io_uring_show_fdinfo(struct io_ring_ctx *ctx,
hlist_for_each_entry(req, list, hash_node)
seq_printf(m, " op=%d, task_works=%d\n", req->opcode,
req->task->task_works != NULL);
task_work_pending(req->task));
}
seq_puts(m, "CqOverflowList:\n");

View File

@ -19,6 +19,11 @@ enum task_work_notify_mode {
TWA_SIGNAL,
};
static inline bool task_work_pending(struct task_struct *task)
{
return READ_ONCE(task->task_works);
}
int task_work_add(struct task_struct *task, struct callback_head *twork,
enum task_work_notify_mode mode);

View File

@ -90,7 +90,7 @@ static inline void tracehook_notify_resume(struct pt_regs *regs)
* hlist_add_head(task->task_works);
*/
smp_mb__after_atomic();
if (unlikely(current->task_works))
if (unlikely(task_work_pending(current)))
task_work_run();
#ifdef CONFIG_KEYS_REQUEST_CACHE
@ -115,7 +115,7 @@ static inline void tracehook_notify_signal(void)
{
clear_thread_flag(TIF_NOTIFY_SIGNAL);
smp_mb__after_atomic();
if (current->task_works)
if (task_work_pending(current))
task_work_run();
}

View File

@ -2344,7 +2344,7 @@ static void ptrace_do_notify(int signr, int exit_code, int why)
void ptrace_notify(int exit_code)
{
BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
if (unlikely(current->task_works))
if (unlikely(task_work_pending(current)))
task_work_run();
spin_lock_irq(&current->sighand->siglock);
@ -2626,7 +2626,7 @@ bool get_signal(struct ksignal *ksig)
struct signal_struct *signal = current->signal;
int signr;
if (unlikely(current->task_works))
if (unlikely(task_work_pending(current)))
task_work_run();
/*

View File

@ -78,7 +78,7 @@ task_work_cancel_match(struct task_struct *task,
struct callback_head *work;
unsigned long flags;
if (likely(!task->task_works))
if (likely(!task_work_pending(task)))
return NULL;
/*
* If cmpxchg() fails we continue without updating pprev.