mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-27 22:24:11 +08:00
pagewalk: don't look up vma if walk->hugetlb_entry is unused
Currently, walk_page_range() calls find_vma() every page table for walk iteration. but it's completely unnecessary if walk->hugetlb_entry is unused. And we don't have to assume find_vma() is a lightweight operation. So this patch checks the walk->hugetlb_entry and avoids the find_vma() call if possible. This patch also makes some cleanups. 1) remove ugly uninitialized_var() and 2) #ifdef in function body. Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: Hiroyuki Kamezawa <kamezawa.hiroyuki@gmail.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Matt Mackall <mpm@selenic.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
4b6ddbf7ed
commit
6c6d528043
@ -126,7 +126,39 @@ static int walk_hugetlb_range(struct vm_area_struct *vma,
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static struct vm_area_struct* hugetlb_vma(unsigned long addr, struct mm_walk *walk)
|
||||
{
|
||||
struct vm_area_struct *vma;
|
||||
|
||||
/* We don't need vma lookup at all. */
|
||||
if (!walk->hugetlb_entry)
|
||||
return NULL;
|
||||
|
||||
VM_BUG_ON(!rwsem_is_locked(&walk->mm->mmap_sem));
|
||||
vma = find_vma(walk->mm, addr);
|
||||
if (vma && vma->vm_start <= addr && is_vm_hugetlb_page(vma))
|
||||
return vma;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#else /* CONFIG_HUGETLB_PAGE */
|
||||
static struct vm_area_struct* hugetlb_vma(unsigned long addr, struct mm_walk *walk)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int walk_hugetlb_range(struct vm_area_struct *vma,
|
||||
unsigned long addr, unsigned long end,
|
||||
struct mm_walk *walk)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_HUGETLB_PAGE */
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* walk_page_range - walk a memory map's page tables with a callback
|
||||
@ -165,18 +197,17 @@ int walk_page_range(unsigned long addr, unsigned long end,
|
||||
|
||||
pgd = pgd_offset(walk->mm, addr);
|
||||
do {
|
||||
struct vm_area_struct *uninitialized_var(vma);
|
||||
struct vm_area_struct *vma;
|
||||
|
||||
next = pgd_addr_end(addr, end);
|
||||
|
||||
#ifdef CONFIG_HUGETLB_PAGE
|
||||
/*
|
||||
* handle hugetlb vma individually because pagetable walk for
|
||||
* the hugetlb page is dependent on the architecture and
|
||||
* we can't handled it in the same manner as non-huge pages.
|
||||
*/
|
||||
vma = find_vma(walk->mm, addr);
|
||||
if (vma && vma->vm_start <= addr && is_vm_hugetlb_page(vma)) {
|
||||
vma = hugetlb_vma(addr, walk);
|
||||
if (vma) {
|
||||
if (vma->vm_end < next)
|
||||
next = vma->vm_end;
|
||||
/*
|
||||
@ -189,7 +220,7 @@ int walk_page_range(unsigned long addr, unsigned long end,
|
||||
pgd = pgd_offset(walk->mm, next);
|
||||
continue;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (pgd_none_or_clear_bad(pgd)) {
|
||||
if (walk->pte_hole)
|
||||
err = walk->pte_hole(addr, next, walk);
|
||||
|
Loading…
Reference in New Issue
Block a user