2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-11-18 07:35:12 +08:00

mm: protect against concurrent vma expansion

expand_stack() runs with a shared mmap_sem lock.  Because of this, there
could be multiple concurrent stack expansions in the same mm, which may
cause problems in the vma gap update code.

I propose to solve this by taking the mm->page_table_lock around such vma
expansions, in order to avoid the concurrency issue.  We only have to
worry about concurrent expand_stack() calls here, since we hold a shared
mmap_sem lock and all vma modificaitons other than expand_stack() are done
under an exclusive mmap_sem lock.

I previously tried to achieve the same effect by making sure all growable
vmas in a given mm would share the same anon_vma, which we already lock
here.  However this turned out to be difficult - all of the schemes I
tried for refcounting the growable anon_vma and clearing turned out ugly.
So, I'm now proposing only the minimal fix.

The overhead of taking the page table lock during stack expansion is
expected to be small: glibc doesn't use expandable stacks for the threads
it creates, so having multiple growable stacks is actually uncommon and we
don't expect the page table lock to get bounced between threads.

Signed-off-by: Michel Lespinasse <walken@google.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Michel Lespinasse 2012-12-12 13:52:25 -08:00 committed by Linus Torvalds
parent c95d26c2ff
commit 4128997b5f

View File

@ -2069,6 +2069,18 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) { if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) {
error = acct_stack_growth(vma, size, grow); error = acct_stack_growth(vma, size, grow);
if (!error) { if (!error) {
/*
* vma_gap_update() doesn't support concurrent
* updates, but we only hold a shared mmap_sem
* lock here, so we need to protect against
* concurrent vma expansions.
* vma_lock_anon_vma() doesn't help here, as
* we don't guarantee that all growable vmas
* in a mm share the same root anon vma.
* So, we reuse mm->page_table_lock to guard
* against concurrent vma expansions.
*/
spin_lock(&vma->vm_mm->page_table_lock);
anon_vma_interval_tree_pre_update_vma(vma); anon_vma_interval_tree_pre_update_vma(vma);
vma->vm_end = address; vma->vm_end = address;
anon_vma_interval_tree_post_update_vma(vma); anon_vma_interval_tree_post_update_vma(vma);
@ -2076,6 +2088,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
vma_gap_update(vma->vm_next); vma_gap_update(vma->vm_next);
else else
vma->vm_mm->highest_vm_end = address; vma->vm_mm->highest_vm_end = address;
spin_unlock(&vma->vm_mm->page_table_lock);
perf_event_mmap(vma); perf_event_mmap(vma);
} }
} }
@ -2126,11 +2140,25 @@ int expand_downwards(struct vm_area_struct *vma,
if (grow <= vma->vm_pgoff) { if (grow <= vma->vm_pgoff) {
error = acct_stack_growth(vma, size, grow); error = acct_stack_growth(vma, size, grow);
if (!error) { if (!error) {
/*
* vma_gap_update() doesn't support concurrent
* updates, but we only hold a shared mmap_sem
* lock here, so we need to protect against
* concurrent vma expansions.
* vma_lock_anon_vma() doesn't help here, as
* we don't guarantee that all growable vmas
* in a mm share the same root anon vma.
* So, we reuse mm->page_table_lock to guard
* against concurrent vma expansions.
*/
spin_lock(&vma->vm_mm->page_table_lock);
anon_vma_interval_tree_pre_update_vma(vma); anon_vma_interval_tree_pre_update_vma(vma);
vma->vm_start = address; vma->vm_start = address;
vma->vm_pgoff -= grow; vma->vm_pgoff -= grow;
anon_vma_interval_tree_post_update_vma(vma); anon_vma_interval_tree_post_update_vma(vma);
vma_gap_update(vma); vma_gap_update(vma);
spin_unlock(&vma->vm_mm->page_table_lock);
perf_event_mmap(vma); perf_event_mmap(vma);
} }
} }