2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-17 09:43:59 +08:00

mempolicy: change get_task_policy() to return default_policy rather than NULL

Every caller of get_task_policy() falls back to default_policy if it
returns NULL. Change get_task_policy() to do this.

Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: David Rientjes <rientjes@google.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Cyrill Gorcunov <gorcunov@openvz.org>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Hugh Dickins <hughd@google.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Oleg Nesterov 2014-10-09 15:27:43 -07:00 committed by Linus Torvalds
parent 2386740d1a
commit f15ca78e33

View File

@ -126,22 +126,20 @@ static struct mempolicy preferred_node_policy[MAX_NUMNODES];
static struct mempolicy *get_task_policy(struct task_struct *p) static struct mempolicy *get_task_policy(struct task_struct *p)
{ {
struct mempolicy *pol = p->mempolicy; struct mempolicy *pol = p->mempolicy;
int node;
if (!pol) { if (pol)
int node = numa_node_id(); return pol;
if (node != NUMA_NO_NODE) { node = numa_node_id();
pol = &preferred_node_policy[node]; if (node != NUMA_NO_NODE) {
/* pol = &preferred_node_policy[node];
* preferred_node_policy is not initialised early in /* preferred_node_policy is not initialised early in boot */
* boot if (pol->mode)
*/ return pol;
if (!pol->mode)
pol = NULL;
}
} }
return pol; return &default_policy;
} }
static const struct mempolicy_operations { static const struct mempolicy_operations {
@ -1644,14 +1642,14 @@ struct mempolicy *get_vma_policy(struct task_struct *task,
mpol_get(pol); mpol_get(pol);
} }
} }
if (!pol)
pol = &default_policy;
return pol; return pol;
} }
bool vma_policy_mof(struct task_struct *task, struct vm_area_struct *vma) bool vma_policy_mof(struct task_struct *task, struct vm_area_struct *vma)
{ {
struct mempolicy *pol = get_task_policy(task); struct mempolicy *pol = get_task_policy(task);
if (vma) { if (vma) {
if (vma->vm_ops && vma->vm_ops->get_policy) { if (vma->vm_ops && vma->vm_ops->get_policy) {
bool ret = false; bool ret = false;
@ -1667,9 +1665,6 @@ bool vma_policy_mof(struct task_struct *task, struct vm_area_struct *vma)
} }
} }
if (!pol)
return default_policy.flags & MPOL_F_MOF;
return pol->flags & MPOL_F_MOF; return pol->flags & MPOL_F_MOF;
} }
@ -2077,7 +2072,7 @@ struct page *alloc_pages_current(gfp_t gfp, unsigned order)
struct page *page; struct page *page;
unsigned int cpuset_mems_cookie; unsigned int cpuset_mems_cookie;
if (!pol || in_interrupt() || (gfp & __GFP_THISNODE)) if (in_interrupt() || (gfp & __GFP_THISNODE))
pol = &default_policy; pol = &default_policy;
retry_cpuset: retry_cpuset: