mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-17 09:43:59 +08:00
bf181b9f9d
When a large VMA (anon or private file mapping) is first touched, which will populate its anon_vma field, and then split into many regions through the use of mprotect(), the original anon_vma ends up linking all of the vmas on a linked list. This can cause rmap to become inefficient, as we have to walk potentially thousands of irrelevent vmas before finding the one a given anon page might fall into. By replacing the same_anon_vma linked list with an interval tree (where each avc's interval is determined by its vma's start and last pgoffs), we can make rmap efficient for this use case again. While the change is large, all of its pieces are fairly simple. Most places that were walking the same_anon_vma list were looking for a known pgoff, so they can just use the anon_vma_interval_tree_foreach() interval tree iterator instead. The exception here is ksm, where the page's index is not known. It would probably be possible to rework ksm so that the index would be known, but for now I have decided to keep things simple and just walk the entirety of the interval tree there. When updating vma's that already have an anon_vma assigned, we must take care to re-index the corresponding avc's on their interval tree. This is done through the use of anon_vma_interval_tree_pre_update_vma() and anon_vma_interval_tree_post_update_vma(), which remove the avc's from their interval tree before the update and re-insert them after the update. The anon_vma stays locked during the update, so there is no chance that rmap would miss the vmas that are being updated. Signed-off-by: Michel Lespinasse <walken@google.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Rik van Riel <riel@redhat.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Daniel Santos <daniel.santos@pobox.com> Cc: Hugh Dickins <hughd@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
74 lines
2.2 KiB
C
74 lines
2.2 KiB
C
/*
|
|
* mm/interval_tree.c - interval tree for mapping->i_mmap
|
|
*
|
|
* Copyright (C) 2012, Michel Lespinasse <walken@google.com>
|
|
*
|
|
* This file is released under the GPL v2.
|
|
*/
|
|
|
|
#include <linux/mm.h>
|
|
#include <linux/fs.h>
|
|
#include <linux/rmap.h>
|
|
#include <linux/interval_tree_generic.h>
|
|
|
|
static inline unsigned long vma_start_pgoff(struct vm_area_struct *v)
|
|
{
|
|
return v->vm_pgoff;
|
|
}
|
|
|
|
static inline unsigned long vma_last_pgoff(struct vm_area_struct *v)
|
|
{
|
|
return v->vm_pgoff + ((v->vm_end - v->vm_start) >> PAGE_SHIFT) - 1;
|
|
}
|
|
|
|
INTERVAL_TREE_DEFINE(struct vm_area_struct, shared.linear.rb,
|
|
unsigned long, shared.linear.rb_subtree_last,
|
|
vma_start_pgoff, vma_last_pgoff,, vma_interval_tree)
|
|
|
|
/* Insert node immediately after prev in the interval tree */
|
|
void vma_interval_tree_insert_after(struct vm_area_struct *node,
|
|
struct vm_area_struct *prev,
|
|
struct rb_root *root)
|
|
{
|
|
struct rb_node **link;
|
|
struct vm_area_struct *parent;
|
|
unsigned long last = vma_last_pgoff(node);
|
|
|
|
VM_BUG_ON(vma_start_pgoff(node) != vma_start_pgoff(prev));
|
|
|
|
if (!prev->shared.linear.rb.rb_right) {
|
|
parent = prev;
|
|
link = &prev->shared.linear.rb.rb_right;
|
|
} else {
|
|
parent = rb_entry(prev->shared.linear.rb.rb_right,
|
|
struct vm_area_struct, shared.linear.rb);
|
|
if (parent->shared.linear.rb_subtree_last < last)
|
|
parent->shared.linear.rb_subtree_last = last;
|
|
while (parent->shared.linear.rb.rb_left) {
|
|
parent = rb_entry(parent->shared.linear.rb.rb_left,
|
|
struct vm_area_struct, shared.linear.rb);
|
|
if (parent->shared.linear.rb_subtree_last < last)
|
|
parent->shared.linear.rb_subtree_last = last;
|
|
}
|
|
link = &parent->shared.linear.rb.rb_left;
|
|
}
|
|
|
|
node->shared.linear.rb_subtree_last = last;
|
|
rb_link_node(&node->shared.linear.rb, &parent->shared.linear.rb, link);
|
|
rb_insert_augmented(&node->shared.linear.rb, root,
|
|
&vma_interval_tree_augment);
|
|
}
|
|
|
|
static inline unsigned long avc_start_pgoff(struct anon_vma_chain *avc)
|
|
{
|
|
return vma_start_pgoff(avc->vma);
|
|
}
|
|
|
|
static inline unsigned long avc_last_pgoff(struct anon_vma_chain *avc)
|
|
{
|
|
return vma_last_pgoff(avc->vma);
|
|
}
|
|
|
|
INTERVAL_TREE_DEFINE(struct anon_vma_chain, rb, unsigned long, rb_subtree_last,
|
|
avc_start_pgoff, avc_last_pgoff,, anon_vma_interval_tree)
|