mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 04:18:39 +08:00
mm, memcg: don't take task_lock in task_in_mem_cgroup
For processes that have detached their mm's, task_in_mem_cgroup() unnecessarily takes task_lock() when rcu_read_lock() is all that is necessary to call mem_cgroup_from_task(). While we're here, switch task_in_mem_cgroup() to return bool. Signed-off-by: David Rientjes <rientjes@google.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Michal Hocko <mhocko@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
541c237c09
commit
ffbdccf5e1
@ -77,7 +77,8 @@ extern void mem_cgroup_uncharge_cache_page(struct page *page);
|
||||
|
||||
bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
|
||||
struct mem_cgroup *memcg);
|
||||
int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *memcg);
|
||||
bool task_in_mem_cgroup(struct task_struct *task,
|
||||
const struct mem_cgroup *memcg);
|
||||
|
||||
extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page);
|
||||
extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
|
||||
@ -273,10 +274,10 @@ static inline bool mm_match_cgroup(struct mm_struct *mm,
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline int task_in_mem_cgroup(struct task_struct *task,
|
||||
const struct mem_cgroup *memcg)
|
||||
static inline bool task_in_mem_cgroup(struct task_struct *task,
|
||||
const struct mem_cgroup *memcg)
|
||||
{
|
||||
return 1;
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline struct cgroup_subsys_state
|
||||
|
@ -1448,11 +1448,12 @@ static bool mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
|
||||
return ret;
|
||||
}
|
||||
|
||||
int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *memcg)
|
||||
bool task_in_mem_cgroup(struct task_struct *task,
|
||||
const struct mem_cgroup *memcg)
|
||||
{
|
||||
int ret;
|
||||
struct mem_cgroup *curr = NULL;
|
||||
struct task_struct *p;
|
||||
bool ret;
|
||||
|
||||
p = find_lock_task_mm(task);
|
||||
if (p) {
|
||||
@ -1464,14 +1465,14 @@ int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *memcg)
|
||||
* killer still needs to detect if they have already been oom
|
||||
* killed to prevent needlessly killing additional tasks.
|
||||
*/
|
||||
task_lock(task);
|
||||
rcu_read_lock();
|
||||
curr = mem_cgroup_from_task(task);
|
||||
if (curr)
|
||||
css_get(&curr->css);
|
||||
task_unlock(task);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
if (!curr)
|
||||
return 0;
|
||||
return false;
|
||||
/*
|
||||
* We should check use_hierarchy of "memcg" not "curr". Because checking
|
||||
* use_hierarchy of "curr" here make this function true if hierarchy is
|
||||
|
Loading…
Reference in New Issue
Block a user