mm/mmap: Fix error path in do_vmi_align_munmap()

commit 606c812eb1 upstream

The error unrolling was leaving the VMAs detached in many cases and
leaving the locked_vm statistic altered, and skipping the unrolling
entirely in the case of the vma tree write failing.

Fix the error path by re-attaching the detached VMAs and adding the
necessary goto for the failed vma tree write, and fix the locked_vm
statistic by only updating after the vma tree write succeeds.

Fixes: 763ecb0350 ("mm: remove the vma linked list")
Reported-by: Vegard Nossum <vegard.nossum@oracle.com>
Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
[ dwmw2: Strictly, the original patch wasn't *re-attaching* the
         detached VMAs. They *were* still attached but just had
         the 'detached' flag set, which is an optimisation. Which
         doesn't exist in 6.3, so drop that. Also drop the call
         to vma_start_write() which came in with the per-VMA
         locking in 6.4. ]
[ dwmw2 (6.1): It's do_mas_align_munmap() here. And has two call
         sites for the now-removed munmap_sidetree() function.
         Inline them both rather then trying to backport various
         dependencies with potentially subtle interactions. ]
Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Liam R. Howlett 2023-06-17 20:47:08 -04:00 committed by Greg Kroah-Hartman
parent a1c449d00f
commit a149174ff8

View File

@ -2311,19 +2311,6 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
return __split_vma(mm, vma, addr, new_below);
}
static inline int munmap_sidetree(struct vm_area_struct *vma,
struct ma_state *mas_detach)
{
mas_set_range(mas_detach, vma->vm_start, vma->vm_end - 1);
if (mas_store_gfp(mas_detach, vma, GFP_KERNEL))
return -ENOMEM;
if (vma->vm_flags & VM_LOCKED)
vma->vm_mm->locked_vm -= vma_pages(vma);
return 0;
}
/*
* do_mas_align_munmap() - munmap the aligned region from @start to @end.
* @mas: The maple_state, ideally set up to alter the correct tree location.
@ -2345,6 +2332,7 @@ do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma,
struct maple_tree mt_detach;
int count = 0;
int error = -ENOMEM;
unsigned long locked_vm = 0;
MA_STATE(mas_detach, &mt_detach, 0, 0);
mt_init_flags(&mt_detach, mas->tree->ma_flags & MT_FLAGS_LOCK_MASK);
mt_set_external_lock(&mt_detach, &mm->mmap_lock);
@ -2403,18 +2391,23 @@ do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma,
mas_set(mas, end);
split = mas_prev(mas, 0);
error = munmap_sidetree(split, &mas_detach);
mas_set_range(&mas_detach, split->vm_start, split->vm_end - 1);
error = mas_store_gfp(&mas_detach, split, GFP_KERNEL);
if (error)
goto munmap_sidetree_failed;
goto munmap_gather_failed;
if (next->vm_flags & VM_LOCKED)
locked_vm += vma_pages(split);
count++;
if (vma == next)
vma = split;
break;
}
error = munmap_sidetree(next, &mas_detach);
if (error)
goto munmap_sidetree_failed;
mas_set_range(&mas_detach, next->vm_start, next->vm_end - 1);
if (mas_store_gfp(&mas_detach, next, GFP_KERNEL))
goto munmap_gather_failed;
if (next->vm_flags & VM_LOCKED)
locked_vm += vma_pages(next);
count++;
#ifdef CONFIG_DEBUG_VM_MAPLE_TREE
@ -2464,6 +2457,8 @@ do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma,
}
#endif
mas_store_prealloc(mas, NULL);
mm->locked_vm -= locked_vm;
mm->map_count -= count;
/*
* Do not downgrade mmap_lock if we are next to VM_GROWSDOWN or
@ -2490,7 +2485,7 @@ do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma,
return downgrade ? 1 : 0;
userfaultfd_error:
munmap_sidetree_failed:
munmap_gather_failed:
end_split_failed:
__mt_destroy(&mt_detach);
start_split_failed: