mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-17 09:43:59 +08:00
mm,oom: speed up select_bad_process() loop
Since commit 3a5dda7a17
("oom: prevent unnecessary oom kills or kernel
panics"), select_bad_process() is using for_each_process_thread().
Since oom_unkillable_task() scans all threads in the caller's thread
group and oom_task_origin() scans signal_struct of the caller's thread
group, we don't need to call oom_unkillable_task() and oom_task_origin()
on each thread. Also, since !mm test will be done later at
oom_badness(), we don't need to do !mm test on each thread. Therefore,
we only need to do TIF_MEMDIE test on each thread.
Although the original code was correct it was quite inefficient because
each thread group was scanned num_threads times which can be a lot
especially with processes with many threads. Even though the OOM is
extremely cold path it is always good to be as effective as possible
when we are inside rcu_read_lock() - aka unpreemptible context.
If we track number of TIF_MEMDIE threads inside signal_struct, we don't
need to do TIF_MEMDIE test on each thread. This will allow
select_bad_process() to use for_each_process().
This patch adds a counter to signal_struct for tracking how many
TIF_MEMDIE threads are in a given thread group, and check it at
oom_scan_process_thread() so that select_bad_process() can use
for_each_process() rather than for_each_process_thread().
[mhocko@suse.com: do not blow the signal_struct size]
Link: http://lkml.kernel.org/r/20160520075035.GF19172@dhcp22.suse.cz
Link: http://lkml.kernel.org/r/201605182230.IDC73435.MVSOHLFOQFOJtF@I-love.SAKURA.ne.jp
Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
Acked-by: Michal Hocko <mhocko@suse.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
98748bd722
commit
f44666b046
@ -669,6 +669,7 @@ struct signal_struct {
|
||||
atomic_t sigcnt;
|
||||
atomic_t live;
|
||||
int nr_threads;
|
||||
atomic_t oom_victims; /* # of TIF_MEDIE threads in this thread group */
|
||||
struct list_head thread_head;
|
||||
|
||||
wait_queue_head_t wait_chldexit; /* for wait4() */
|
||||
|
@ -283,12 +283,8 @@ enum oom_scan_t oom_scan_process_thread(struct oom_control *oc,
|
||||
* This task already has access to memory reserves and is being killed.
|
||||
* Don't allow any other task to have access to the reserves.
|
||||
*/
|
||||
if (test_tsk_thread_flag(task, TIF_MEMDIE)) {
|
||||
if (!is_sysrq_oom(oc))
|
||||
return OOM_SCAN_ABORT;
|
||||
}
|
||||
if (!task->mm)
|
||||
return OOM_SCAN_CONTINUE;
|
||||
if (!is_sysrq_oom(oc) && atomic_read(&task->signal->oom_victims))
|
||||
return OOM_SCAN_ABORT;
|
||||
|
||||
/*
|
||||
* If task is allocating a lot of memory and has been marked to be
|
||||
@ -307,12 +303,12 @@ enum oom_scan_t oom_scan_process_thread(struct oom_control *oc,
|
||||
static struct task_struct *select_bad_process(struct oom_control *oc,
|
||||
unsigned int *ppoints, unsigned long totalpages)
|
||||
{
|
||||
struct task_struct *g, *p;
|
||||
struct task_struct *p;
|
||||
struct task_struct *chosen = NULL;
|
||||
unsigned long chosen_points = 0;
|
||||
|
||||
rcu_read_lock();
|
||||
for_each_process_thread(g, p) {
|
||||
for_each_process(p) {
|
||||
unsigned int points;
|
||||
|
||||
switch (oom_scan_process_thread(oc, p, totalpages)) {
|
||||
@ -331,9 +327,6 @@ static struct task_struct *select_bad_process(struct oom_control *oc,
|
||||
points = oom_badness(p, NULL, oc->nodemask, totalpages);
|
||||
if (!points || points < chosen_points)
|
||||
continue;
|
||||
/* Prefer thread group leaders for display purposes */
|
||||
if (points == chosen_points && thread_group_leader(chosen))
|
||||
continue;
|
||||
|
||||
chosen = p;
|
||||
chosen_points = points;
|
||||
@ -673,6 +666,7 @@ void mark_oom_victim(struct task_struct *tsk)
|
||||
/* OOM killer might race with memcg OOM */
|
||||
if (test_and_set_tsk_thread_flag(tsk, TIF_MEMDIE))
|
||||
return;
|
||||
atomic_inc(&tsk->signal->oom_victims);
|
||||
/*
|
||||
* Make sure that the task is woken up from uninterruptible sleep
|
||||
* if it is frozen because OOM killer wouldn't be able to free
|
||||
@ -690,6 +684,7 @@ void exit_oom_victim(struct task_struct *tsk)
|
||||
{
|
||||
if (!test_and_clear_tsk_thread_flag(tsk, TIF_MEMDIE))
|
||||
return;
|
||||
atomic_dec(&tsk->signal->oom_victims);
|
||||
|
||||
if (!atomic_dec_return(&oom_victims))
|
||||
wake_up_all(&oom_victims_wait);
|
||||
|
Loading…
Reference in New Issue
Block a user