mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 12:28:41 +08:00
mm, mempolicy: remove per-process flag
PF_MEMPOLICY is an unnecessary optimization for CONFIG_SLAB users. There's no significant performance degradation to checking current->mempolicy rather than current->flags & PF_MEMPOLICY in the allocation path, especially since this is considered unlikely(). Running TCP_RR with netperf-2.4.5 through localhost on 16 cpu machine with 64GB of memory and without a mempolicy: threads before after 16 1249409 1244487 32 1281786 1246783 48 1239175 1239138 64 1244642 1241841 80 1244346 1248918 96 1266436 1254316 112 1307398 1312135 128 1327607 1326502 Per-process flags are a scarce resource so we should free them up whenever possible and make them available. We'll be using it shortly for memcg oom reserves. Signed-off-by: David Rientjes <rientjes@google.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@suse.cz> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Christoph Lameter <cl@linux-foundation.org> Cc: Pekka Enberg <penberg@kernel.org> Cc: Tejun Heo <tj@kernel.org> Cc: Mel Gorman <mgorman@suse.de> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Rik van Riel <riel@redhat.com> Cc: Jianguo Wu <wujianguo@huawei.com> Cc: Tim Hockin <thockin@google.com> Cc: Christoph Lameter <cl@linux.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
2a389610a7
commit
f0432d1596
@ -143,7 +143,6 @@ extern void numa_policy_init(void);
|
||||
extern void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new,
|
||||
enum mpol_rebind_step step);
|
||||
extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new);
|
||||
extern void mpol_fix_fork_child_flag(struct task_struct *p);
|
||||
|
||||
extern struct zonelist *huge_zonelist(struct vm_area_struct *vma,
|
||||
unsigned long addr, gfp_t gfp_flags,
|
||||
|
@ -1851,7 +1851,6 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut,
|
||||
#define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */
|
||||
#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */
|
||||
#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */
|
||||
#define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */
|
||||
#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
|
||||
#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */
|
||||
#define PF_SUSPEND_TASK 0x80000000 /* this thread called freeze_processes and should not be frozen */
|
||||
|
@ -1276,7 +1276,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
|
||||
p->mempolicy = NULL;
|
||||
goto bad_fork_cleanup_threadgroup_lock;
|
||||
}
|
||||
mpol_fix_fork_child_flag(p);
|
||||
#endif
|
||||
#ifdef CONFIG_CPUSETS
|
||||
p->cpuset_mem_spread_rotor = NUMA_NO_NODE;
|
||||
|
@ -795,36 +795,6 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
|
||||
return err;
|
||||
}
|
||||
|
||||
/*
|
||||
* Update task->flags PF_MEMPOLICY bit: set iff non-default
|
||||
* mempolicy. Allows more rapid checking of this (combined perhaps
|
||||
* with other PF_* flag bits) on memory allocation hot code paths.
|
||||
*
|
||||
* If called from outside this file, the task 'p' should -only- be
|
||||
* a newly forked child not yet visible on the task list, because
|
||||
* manipulating the task flags of a visible task is not safe.
|
||||
*
|
||||
* The above limitation is why this routine has the funny name
|
||||
* mpol_fix_fork_child_flag().
|
||||
*
|
||||
* It is also safe to call this with a task pointer of current,
|
||||
* which the static wrapper mpol_set_task_struct_flag() does,
|
||||
* for use within this file.
|
||||
*/
|
||||
|
||||
void mpol_fix_fork_child_flag(struct task_struct *p)
|
||||
{
|
||||
if (p->mempolicy)
|
||||
p->flags |= PF_MEMPOLICY;
|
||||
else
|
||||
p->flags &= ~PF_MEMPOLICY;
|
||||
}
|
||||
|
||||
static void mpol_set_task_struct_flag(void)
|
||||
{
|
||||
mpol_fix_fork_child_flag(current);
|
||||
}
|
||||
|
||||
/* Set the process memory policy */
|
||||
static long do_set_mempolicy(unsigned short mode, unsigned short flags,
|
||||
nodemask_t *nodes)
|
||||
@ -861,7 +831,6 @@ static long do_set_mempolicy(unsigned short mode, unsigned short flags,
|
||||
}
|
||||
old = current->mempolicy;
|
||||
current->mempolicy = new;
|
||||
mpol_set_task_struct_flag();
|
||||
if (new && new->mode == MPOL_INTERLEAVE &&
|
||||
nodes_weight(new->v.nodes))
|
||||
current->il_next = first_node(new->v.nodes);
|
||||
|
@ -3027,7 +3027,7 @@ out:
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
/*
|
||||
* Try allocating on another node if PF_SPREAD_SLAB|PF_MEMPOLICY.
|
||||
* Try allocating on another node if PF_SPREAD_SLAB is a mempolicy is set.
|
||||
*
|
||||
* If we are in_interrupt, then process context, including cpusets and
|
||||
* mempolicy, may not apply and should not be used for allocation policy.
|
||||
@ -3259,7 +3259,7 @@ __do_cache_alloc(struct kmem_cache *cache, gfp_t flags)
|
||||
{
|
||||
void *objp;
|
||||
|
||||
if (unlikely(current->flags & (PF_SPREAD_SLAB | PF_MEMPOLICY))) {
|
||||
if (current->mempolicy || unlikely(current->flags & PF_SPREAD_SLAB)) {
|
||||
objp = alternate_node_alloc(cache, flags);
|
||||
if (objp)
|
||||
goto out;
|
||||
|
Loading…
Reference in New Issue
Block a user