mm: numa: Migrate on reference policy

This is the simplest possible policy that still does something of note.
When a pte_numa is faulted, it is moved immediately. Any replacement
policy must at least do better than this and in all likelihood this
policy regresses normal workloads.

Signed-off-by: Mel Gorman <mgorman@suse.de>
Acked-by: Rik van Riel <riel@redhat.com>
This commit is contained in:
Mel Gorman 2012-11-02 18:19:13 +00:00
parent 03c5a6e163
commit 5606e3877a
2 changed files with 37 additions and 2 deletions

View File

@ -67,6 +67,7 @@ enum mpol_rebind_step {
#define MPOL_F_LOCAL (1 << 1) /* preferred local allocation */ #define MPOL_F_LOCAL (1 << 1) /* preferred local allocation */
#define MPOL_F_REBINDING (1 << 2) /* identify policies in rebinding */ #define MPOL_F_REBINDING (1 << 2) /* identify policies in rebinding */
#define MPOL_F_MOF (1 << 3) /* this policy wants migrate on fault */ #define MPOL_F_MOF (1 << 3) /* this policy wants migrate on fault */
#define MPOL_F_MORON (1 << 4) /* Migrate On pte_numa Reference On Node */
#endif /* _UAPI_LINUX_MEMPOLICY_H */ #endif /* _UAPI_LINUX_MEMPOLICY_H */

View File

@ -118,6 +118,26 @@ static struct mempolicy default_policy = {
.flags = MPOL_F_LOCAL, .flags = MPOL_F_LOCAL,
}; };
static struct mempolicy preferred_node_policy[MAX_NUMNODES];
static struct mempolicy *get_task_policy(struct task_struct *p)
{
struct mempolicy *pol = p->mempolicy;
int node;
if (!pol) {
node = numa_node_id();
if (node != -1)
pol = &preferred_node_policy[node];
/* preferred_node_policy is not initialised early in boot */
if (!pol->mode)
pol = NULL;
}
return pol;
}
static const struct mempolicy_operations { static const struct mempolicy_operations {
int (*create)(struct mempolicy *pol, const nodemask_t *nodes); int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
/* /*
@ -1598,7 +1618,7 @@ asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
struct mempolicy *get_vma_policy(struct task_struct *task, struct mempolicy *get_vma_policy(struct task_struct *task,
struct vm_area_struct *vma, unsigned long addr) struct vm_area_struct *vma, unsigned long addr)
{ {
struct mempolicy *pol = task->mempolicy; struct mempolicy *pol = get_task_policy(task);
if (vma) { if (vma) {
if (vma->vm_ops && vma->vm_ops->get_policy) { if (vma->vm_ops && vma->vm_ops->get_policy) {
@ -2021,7 +2041,7 @@ retry_cpuset:
*/ */
struct page *alloc_pages_current(gfp_t gfp, unsigned order) struct page *alloc_pages_current(gfp_t gfp, unsigned order)
{ {
struct mempolicy *pol = current->mempolicy; struct mempolicy *pol = get_task_policy(current);
struct page *page; struct page *page;
unsigned int cpuset_mems_cookie; unsigned int cpuset_mems_cookie;
@ -2295,6 +2315,11 @@ int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long
default: default:
BUG(); BUG();
} }
/* Migrate the page towards the node whose CPU is referencing it */
if (pol->flags & MPOL_F_MORON)
polnid = numa_node_id();
if (curnid != polnid) if (curnid != polnid)
ret = polnid; ret = polnid;
out: out:
@ -2483,6 +2508,15 @@ void __init numa_policy_init(void)
sizeof(struct sp_node), sizeof(struct sp_node),
0, SLAB_PANIC, NULL); 0, SLAB_PANIC, NULL);
for_each_node(nid) {
preferred_node_policy[nid] = (struct mempolicy) {
.refcnt = ATOMIC_INIT(1),
.mode = MPOL_PREFERRED,
.flags = MPOL_F_MOF | MPOL_F_MORON,
.v = { .preferred_node = nid, },
};
}
/* /*
* Set interleaving policy for system init. Interleaving is only * Set interleaving policy for system init. Interleaving is only
* enabled across suitably sized nodes (default is >= 16MB), or * enabled across suitably sized nodes (default is >= 16MB), or