mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-15 00:34:10 +08:00
signal: Rename group_exit_task group_exec_task
The only remaining user of group_exit_task is exec. Rename the field so that it is clear which part of the code uses it. Update the comment above the definition of group_exec_task to document how it is currently used. Link: https://lkml.kernel.org/r/20211213225350.27481-7-ebiederm@xmission.com Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com>
This commit is contained in:
parent
6ac79ec537
commit
60700e38fb
@ -1054,7 +1054,7 @@ static int de_thread(struct task_struct *tsk)
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
sig->group_exit_task = tsk;
|
||||
sig->group_exec_task = tsk;
|
||||
sig->notify_count = zap_other_threads(tsk);
|
||||
if (!thread_group_leader(tsk))
|
||||
sig->notify_count--;
|
||||
@ -1082,7 +1082,7 @@ static int de_thread(struct task_struct *tsk)
|
||||
write_lock_irq(&tasklist_lock);
|
||||
/*
|
||||
* Do this under tasklist_lock to ensure that
|
||||
* exit_notify() can't miss ->group_exit_task
|
||||
* exit_notify() can't miss ->group_exec_task
|
||||
*/
|
||||
sig->notify_count = -1;
|
||||
if (likely(leader->exit_state))
|
||||
@ -1149,7 +1149,7 @@ static int de_thread(struct task_struct *tsk)
|
||||
release_task(leader);
|
||||
}
|
||||
|
||||
sig->group_exit_task = NULL;
|
||||
sig->group_exec_task = NULL;
|
||||
sig->notify_count = 0;
|
||||
|
||||
no_thread_group:
|
||||
@ -1162,7 +1162,7 @@ no_thread_group:
|
||||
killed:
|
||||
/* protects against exit_notify() and __exit_signal() */
|
||||
read_lock(&tasklist_lock);
|
||||
sig->group_exit_task = NULL;
|
||||
sig->group_exec_task = NULL;
|
||||
sig->notify_count = 0;
|
||||
read_unlock(&tasklist_lock);
|
||||
return -EAGAIN;
|
||||
|
@ -109,13 +109,9 @@ struct signal_struct {
|
||||
|
||||
/* thread group exit support */
|
||||
int group_exit_code;
|
||||
/* overloaded:
|
||||
* - notify group_exit_task when ->count is equal to notify_count
|
||||
* - everyone except group_exit_task is stopped during signal delivery
|
||||
* of fatal signals, group_exit_task processes the signal.
|
||||
*/
|
||||
/* notify group_exec_task when notify_count is less or equal to 0 */
|
||||
int notify_count;
|
||||
struct task_struct *group_exit_task;
|
||||
struct task_struct *group_exec_task;
|
||||
|
||||
/* thread group stop support, overloads group_exit_code too */
|
||||
int group_stop_count;
|
||||
@ -275,11 +271,11 @@ static inline void signal_set_stop_flags(struct signal_struct *sig,
|
||||
sig->flags = (sig->flags & ~SIGNAL_STOP_MASK) | flags;
|
||||
}
|
||||
|
||||
/* If true, all threads except ->group_exit_task have pending SIGKILL */
|
||||
/* If true, all threads except ->group_exec_task have pending SIGKILL */
|
||||
static inline int signal_group_exit(const struct signal_struct *sig)
|
||||
{
|
||||
return (sig->flags & SIGNAL_GROUP_EXIT) ||
|
||||
(sig->group_exit_task != NULL);
|
||||
(sig->group_exec_task != NULL);
|
||||
}
|
||||
|
||||
extern void flush_signals(struct task_struct *);
|
||||
|
@ -116,7 +116,7 @@ static void __exit_signal(struct task_struct *tsk)
|
||||
* then notify it:
|
||||
*/
|
||||
if (sig->notify_count > 0 && !--sig->notify_count)
|
||||
wake_up_process(sig->group_exit_task);
|
||||
wake_up_process(sig->group_exec_task);
|
||||
|
||||
if (tsk == sig->curr_target)
|
||||
sig->curr_target = next_thread(tsk);
|
||||
@ -697,7 +697,7 @@ static void exit_notify(struct task_struct *tsk, int group_dead)
|
||||
|
||||
/* mt-exec, de_thread() is waiting for group leader */
|
||||
if (unlikely(tsk->signal->notify_count < 0))
|
||||
wake_up_process(tsk->signal->group_exit_task);
|
||||
wake_up_process(tsk->signal->group_exec_task);
|
||||
write_unlock_irq(&tasklist_lock);
|
||||
|
||||
list_for_each_entry_safe(p, n, &dead, ptrace_entry) {
|
||||
|
Loading…
Reference in New Issue
Block a user