task_mmu: convert to vma iterator

Use the vma iterator so that the iterator can be invalidated or updated to
avoid each caller doing so.

Update the comments to how the vma iterator works.  The vma iterator will
keep track of the last vm_end and start the search from vm_end + 1.

Link: https://lkml.kernel.org/r/20230120162650.984577-22-Liam.Howlett@oracle.com
Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Liam R. Howlett 2023-01-20 11:26:22 -05:00 committed by Andrew Morton
parent f10c2abcda
commit 250cb40f0a

View File

@ -890,7 +890,7 @@ static int show_smaps_rollup(struct seq_file *m, void *v)
struct vm_area_struct *vma;
unsigned long vma_start = 0, last_vma_end = 0;
int ret = 0;
MA_STATE(mas, &mm->mm_mt, 0, 0);
VMA_ITERATOR(vmi, mm, 0);
priv->task = get_proc_task(priv->inode);
if (!priv->task)
@ -908,7 +908,7 @@ static int show_smaps_rollup(struct seq_file *m, void *v)
goto out_put_mm;
hold_task_mempolicy(priv);
vma = mas_find(&mas, ULONG_MAX);
vma = vma_next(&vmi);
if (unlikely(!vma))
goto empty_set;
@ -923,7 +923,7 @@ static int show_smaps_rollup(struct seq_file *m, void *v)
* access it for write request.
*/
if (mmap_lock_is_contended(mm)) {
mas_pause(&mas);
vma_iter_invalidate(&vmi);
mmap_read_unlock(mm);
ret = mmap_read_lock_killable(mm);
if (ret) {
@ -948,31 +948,31 @@ static int show_smaps_rollup(struct seq_file *m, void *v)
*
* 1) VMA2 is freed, but VMA3 exists:
*
* find_vma(mm, 16k - 1) will return VMA3.
* vma_next(vmi) will return VMA3.
* In this case, just continue from VMA3.
*
* 2) VMA2 still exists:
*
* find_vma(mm, 16k - 1) will return VMA2.
* Iterate the loop like the original one.
* vma_next(vmi) will return VMA3.
* In this case, just continue from VMA3.
*
* 3) No more VMAs can be found:
*
* find_vma(mm, 16k - 1) will return NULL.
* vma_next(vmi) will return NULL.
* No more things to do, just break.
*
* 4) (last_vma_end - 1) is the middle of a vma (VMA'):
*
* find_vma(mm, 16k - 1) will return VMA' whose range
* vma_next(vmi) will return VMA' whose range
* contains last_vma_end.
* Iterate VMA' from last_vma_end.
*/
vma = mas_find(&mas, ULONG_MAX);
vma = vma_next(&vmi);
/* Case 3 above */
if (!vma)
break;
/* Case 1 above */
/* Case 1 and 2 above */
if (vma->vm_start >= last_vma_end)
continue;
@ -980,8 +980,7 @@ static int show_smaps_rollup(struct seq_file *m, void *v)
if (vma->vm_end > last_vma_end)
smap_gather_stats(vma, &mss, last_vma_end);
}
/* Case 2 above */
} while ((vma = mas_find(&mas, ULONG_MAX)) != NULL);
} for_each_vma(vmi, vma);
empty_set:
show_vma_header_prefix(m, vma_start, last_vma_end, 0, 0, 0, 0);
@ -1277,7 +1276,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
return -ESRCH;
mm = get_task_mm(task);
if (mm) {
MA_STATE(mas, &mm->mm_mt, 0, 0);
VMA_ITERATOR(vmi, mm, 0);
struct mmu_notifier_range range;
struct clear_refs_private cp = {
.type = type,
@ -1297,7 +1296,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
}
if (type == CLEAR_REFS_SOFT_DIRTY) {
mas_for_each(&mas, vma, ULONG_MAX) {
for_each_vma(vmi, vma) {
if (!(vma->vm_flags & VM_SOFTDIRTY))
continue;
vma->vm_flags &= ~VM_SOFTDIRTY;