mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-16 09:13:55 +08:00
mm, oom: remove redundant task_in_mem_cgroup() check
oom_unkillable_task() can be called from three different contexts i.e. global OOM, memcg OOM and oom_score procfs interface. At the moment oom_unkillable_task() does a task_in_mem_cgroup() check on the given process. Since there is no reason to perform task_in_mem_cgroup() check for global OOM and oom_score procfs interface, those contexts provide NULL memcg and skips the task_in_mem_cgroup() check. However for memcg OOM context, the oom_unkillable_task() is always called from mem_cgroup_scan_tasks() and thus task_in_mem_cgroup() check becomes redundant and effectively dead code. So, just remove the task_in_mem_cgroup() check altogether. Link: http://lkml.kernel.org/r/20190624212631.87212-2-shakeelb@google.com Signed-off-by: Shakeel Butt <shakeelb@google.com> Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp> Acked-by: Roman Gushchin <guro@fb.com> Acked-by: Michal Hocko <mhocko@suse.com> Cc: David Rientjes <rientjes@google.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Nick Piggin <npiggin@suse.de> Cc: Paul Jackson <pj@sgi.com> Cc: Vladimir Davydov <vdavydov.dev@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
5eee7e1cdb
commit
6ba749ee78
@ -532,7 +532,7 @@ static int proc_oom_score(struct seq_file *m, struct pid_namespace *ns,
|
||||
unsigned long totalpages = totalram_pages() + total_swap_pages;
|
||||
unsigned long points = 0;
|
||||
|
||||
points = oom_badness(task, NULL, NULL, totalpages) *
|
||||
points = oom_badness(task, NULL, totalpages) *
|
||||
1000 / totalpages;
|
||||
seq_printf(m, "%lu\n", points);
|
||||
|
||||
|
@ -394,7 +394,6 @@ out:
|
||||
|
||||
struct lruvec *mem_cgroup_page_lruvec(struct page *, struct pglist_data *);
|
||||
|
||||
bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg);
|
||||
struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
|
||||
|
||||
struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm);
|
||||
@ -875,12 +874,6 @@ static inline bool mm_match_cgroup(struct mm_struct *mm,
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline bool task_in_mem_cgroup(struct task_struct *task,
|
||||
const struct mem_cgroup *memcg)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
|
||||
{
|
||||
return NULL;
|
||||
|
@ -108,7 +108,7 @@ static inline vm_fault_t check_stable_address_space(struct mm_struct *mm)
|
||||
bool __oom_reap_task_mm(struct mm_struct *mm);
|
||||
|
||||
extern unsigned long oom_badness(struct task_struct *p,
|
||||
struct mem_cgroup *memcg, const nodemask_t *nodemask,
|
||||
const nodemask_t *nodemask,
|
||||
unsigned long totalpages);
|
||||
|
||||
extern bool out_of_memory(struct oom_control *oc);
|
||||
|
@ -1259,32 +1259,6 @@ void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
|
||||
*lru_size += nr_pages;
|
||||
}
|
||||
|
||||
bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg)
|
||||
{
|
||||
struct mem_cgroup *task_memcg;
|
||||
struct task_struct *p;
|
||||
bool ret;
|
||||
|
||||
p = find_lock_task_mm(task);
|
||||
if (p) {
|
||||
task_memcg = get_mem_cgroup_from_mm(p->mm);
|
||||
task_unlock(p);
|
||||
} else {
|
||||
/*
|
||||
* All threads may have already detached their mm's, but the oom
|
||||
* killer still needs to detect if they have already been oom
|
||||
* killed to prevent needlessly killing additional tasks.
|
||||
*/
|
||||
rcu_read_lock();
|
||||
task_memcg = mem_cgroup_from_task(task);
|
||||
css_get(&task_memcg->css);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
ret = mem_cgroup_is_descendant(task_memcg, memcg);
|
||||
css_put(&task_memcg->css);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* mem_cgroup_margin - calculate chargeable space of a memory cgroup
|
||||
* @memcg: the memory cgroup
|
||||
|
@ -153,17 +153,13 @@ static inline bool is_memcg_oom(struct oom_control *oc)
|
||||
|
||||
/* return true if the task is not adequate as candidate victim task. */
|
||||
static bool oom_unkillable_task(struct task_struct *p,
|
||||
struct mem_cgroup *memcg, const nodemask_t *nodemask)
|
||||
const nodemask_t *nodemask)
|
||||
{
|
||||
if (is_global_init(p))
|
||||
return true;
|
||||
if (p->flags & PF_KTHREAD)
|
||||
return true;
|
||||
|
||||
/* When mem_cgroup_out_of_memory() and p is not member of the group */
|
||||
if (memcg && !task_in_mem_cgroup(p, memcg))
|
||||
return true;
|
||||
|
||||
/* p may not have freeable memory in nodemask */
|
||||
if (!has_intersects_mems_allowed(p, nodemask))
|
||||
return true;
|
||||
@ -194,20 +190,19 @@ static bool is_dump_unreclaim_slabs(void)
|
||||
* oom_badness - heuristic function to determine which candidate task to kill
|
||||
* @p: task struct of which task we should calculate
|
||||
* @totalpages: total present RAM allowed for page allocation
|
||||
* @memcg: task's memory controller, if constrained
|
||||
* @nodemask: nodemask passed to page allocator for mempolicy ooms
|
||||
*
|
||||
* The heuristic for determining which task to kill is made to be as simple and
|
||||
* predictable as possible. The goal is to return the highest value for the
|
||||
* task consuming the most memory to avoid subsequent oom failures.
|
||||
*/
|
||||
unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
|
||||
unsigned long oom_badness(struct task_struct *p,
|
||||
const nodemask_t *nodemask, unsigned long totalpages)
|
||||
{
|
||||
long points;
|
||||
long adj;
|
||||
|
||||
if (oom_unkillable_task(p, memcg, nodemask))
|
||||
if (oom_unkillable_task(p, nodemask))
|
||||
return 0;
|
||||
|
||||
p = find_lock_task_mm(p);
|
||||
@ -318,7 +313,7 @@ static int oom_evaluate_task(struct task_struct *task, void *arg)
|
||||
struct oom_control *oc = arg;
|
||||
unsigned long points;
|
||||
|
||||
if (oom_unkillable_task(task, NULL, oc->nodemask))
|
||||
if (oom_unkillable_task(task, oc->nodemask))
|
||||
goto next;
|
||||
|
||||
/*
|
||||
@ -342,7 +337,7 @@ static int oom_evaluate_task(struct task_struct *task, void *arg)
|
||||
goto select;
|
||||
}
|
||||
|
||||
points = oom_badness(task, NULL, oc->nodemask, oc->totalpages);
|
||||
points = oom_badness(task, oc->nodemask, oc->totalpages);
|
||||
if (!points || points < oc->chosen_points)
|
||||
goto next;
|
||||
|
||||
@ -387,7 +382,7 @@ static int dump_task(struct task_struct *p, void *arg)
|
||||
struct oom_control *oc = arg;
|
||||
struct task_struct *task;
|
||||
|
||||
if (oom_unkillable_task(p, NULL, oc->nodemask))
|
||||
if (oom_unkillable_task(p, oc->nodemask))
|
||||
return 0;
|
||||
|
||||
task = find_lock_task_mm(p);
|
||||
@ -1084,7 +1079,7 @@ bool out_of_memory(struct oom_control *oc)
|
||||
check_panic_on_oom(oc);
|
||||
|
||||
if (!is_memcg_oom(oc) && sysctl_oom_kill_allocating_task &&
|
||||
current->mm && !oom_unkillable_task(current, NULL, oc->nodemask) &&
|
||||
current->mm && !oom_unkillable_task(current, oc->nodemask) &&
|
||||
current->signal->oom_score_adj != OOM_SCORE_ADJ_MIN) {
|
||||
get_task_struct(current);
|
||||
oc->chosen = current;
|
||||
|
Loading…
Reference in New Issue
Block a user