mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-28 05:24:47 +08:00
b30fe6c7ce
The problem is that we check nr_ptes/nr_pmds in exit_mmap() which happens *before* pgd_free(). And if an arch does pte/pmd allocation in pgd_alloc() and frees them in pgd_free() we see offset in counters by the time of the checks. We tried to workaround this by offsetting expected counter value according to FIRST_USER_ADDRESS for both nr_pte and nr_pmd in exit_mmap(). But it doesn't work in some cases: 1. ARM with LPAE enabled also has non-zero USER_PGTABLES_CEILING, but upper addresses occupied with huge pmd entries, so the trick with offsetting expected counter value will get really ugly: we will have to apply it nr_pmds, but not nr_ptes. 2. Metag has non-zero FIRST_USER_ADDRESS, but doesn't do allocation pte/pmd page tables allocation in pgd_alloc(), just setup a pgd entry which is allocated at boot and shared accross all processes. The proposal is to move the check to check_mm() which happens *after* pgd_free() and do proper accounting during pgd_alloc() and pgd_free() which would bring counters to zero if nothing leaked. Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Reported-by: Tyler Baker <tyler.baker@linaro.org> Tested-by: Tyler Baker <tyler.baker@linaro.org> Tested-by: Nishanth Menon <nm@ti.com> Cc: Russell King <linux@arm.linux.org.uk> Cc: James Hogan <james.hogan@imgtec.com> Cc: Guan Xuetao <gxt@mprc.pku.edu.cn> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
165 lines
3.4 KiB
C
165 lines
3.4 KiB
C
/*
|
|
* linux/arch/arm/mm/pgd.c
|
|
*
|
|
* Copyright (C) 1998-2005 Russell King
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
* published by the Free Software Foundation.
|
|
*/
|
|
#include <linux/mm.h>
|
|
#include <linux/gfp.h>
|
|
#include <linux/highmem.h>
|
|
#include <linux/slab.h>
|
|
|
|
#include <asm/cp15.h>
|
|
#include <asm/pgalloc.h>
|
|
#include <asm/page.h>
|
|
#include <asm/tlbflush.h>
|
|
|
|
#include "mm.h"
|
|
|
|
#ifdef CONFIG_ARM_LPAE
|
|
#define __pgd_alloc() kmalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL)
|
|
#define __pgd_free(pgd) kfree(pgd)
|
|
#else
|
|
#define __pgd_alloc() (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_REPEAT, 2)
|
|
#define __pgd_free(pgd) free_pages((unsigned long)pgd, 2)
|
|
#endif
|
|
|
|
/*
|
|
* need to get a 16k page for level 1
|
|
*/
|
|
pgd_t *pgd_alloc(struct mm_struct *mm)
|
|
{
|
|
pgd_t *new_pgd, *init_pgd;
|
|
pud_t *new_pud, *init_pud;
|
|
pmd_t *new_pmd, *init_pmd;
|
|
pte_t *new_pte, *init_pte;
|
|
|
|
new_pgd = __pgd_alloc();
|
|
if (!new_pgd)
|
|
goto no_pgd;
|
|
|
|
memset(new_pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
|
|
|
|
/*
|
|
* Copy over the kernel and IO PGD entries
|
|
*/
|
|
init_pgd = pgd_offset_k(0);
|
|
memcpy(new_pgd + USER_PTRS_PER_PGD, init_pgd + USER_PTRS_PER_PGD,
|
|
(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
|
|
|
|
clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t));
|
|
|
|
#ifdef CONFIG_ARM_LPAE
|
|
/*
|
|
* Allocate PMD table for modules and pkmap mappings.
|
|
*/
|
|
new_pud = pud_alloc(mm, new_pgd + pgd_index(MODULES_VADDR),
|
|
MODULES_VADDR);
|
|
if (!new_pud)
|
|
goto no_pud;
|
|
|
|
new_pmd = pmd_alloc(mm, new_pud, 0);
|
|
if (!new_pmd)
|
|
goto no_pmd;
|
|
#endif
|
|
|
|
if (!vectors_high()) {
|
|
/*
|
|
* On ARM, first page must always be allocated since it
|
|
* contains the machine vectors. The vectors are always high
|
|
* with LPAE.
|
|
*/
|
|
new_pud = pud_alloc(mm, new_pgd, 0);
|
|
if (!new_pud)
|
|
goto no_pud;
|
|
|
|
new_pmd = pmd_alloc(mm, new_pud, 0);
|
|
if (!new_pmd)
|
|
goto no_pmd;
|
|
|
|
new_pte = pte_alloc_map(mm, NULL, new_pmd, 0);
|
|
if (!new_pte)
|
|
goto no_pte;
|
|
|
|
init_pud = pud_offset(init_pgd, 0);
|
|
init_pmd = pmd_offset(init_pud, 0);
|
|
init_pte = pte_offset_map(init_pmd, 0);
|
|
set_pte_ext(new_pte + 0, init_pte[0], 0);
|
|
set_pte_ext(new_pte + 1, init_pte[1], 0);
|
|
pte_unmap(init_pte);
|
|
pte_unmap(new_pte);
|
|
}
|
|
|
|
return new_pgd;
|
|
|
|
no_pte:
|
|
pmd_free(mm, new_pmd);
|
|
mm_dec_nr_pmds(mm);
|
|
no_pmd:
|
|
pud_free(mm, new_pud);
|
|
no_pud:
|
|
__pgd_free(new_pgd);
|
|
no_pgd:
|
|
return NULL;
|
|
}
|
|
|
|
void pgd_free(struct mm_struct *mm, pgd_t *pgd_base)
|
|
{
|
|
pgd_t *pgd;
|
|
pud_t *pud;
|
|
pmd_t *pmd;
|
|
pgtable_t pte;
|
|
|
|
if (!pgd_base)
|
|
return;
|
|
|
|
pgd = pgd_base + pgd_index(0);
|
|
if (pgd_none_or_clear_bad(pgd))
|
|
goto no_pgd;
|
|
|
|
pud = pud_offset(pgd, 0);
|
|
if (pud_none_or_clear_bad(pud))
|
|
goto no_pud;
|
|
|
|
pmd = pmd_offset(pud, 0);
|
|
if (pmd_none_or_clear_bad(pmd))
|
|
goto no_pmd;
|
|
|
|
pte = pmd_pgtable(*pmd);
|
|
pmd_clear(pmd);
|
|
pte_free(mm, pte);
|
|
atomic_long_dec(&mm->nr_ptes);
|
|
no_pmd:
|
|
pud_clear(pud);
|
|
pmd_free(mm, pmd);
|
|
mm_dec_nr_pmds(mm);
|
|
no_pud:
|
|
pgd_clear(pgd);
|
|
pud_free(mm, pud);
|
|
no_pgd:
|
|
#ifdef CONFIG_ARM_LPAE
|
|
/*
|
|
* Free modules/pkmap or identity pmd tables.
|
|
*/
|
|
for (pgd = pgd_base; pgd < pgd_base + PTRS_PER_PGD; pgd++) {
|
|
if (pgd_none_or_clear_bad(pgd))
|
|
continue;
|
|
if (pgd_val(*pgd) & L_PGD_SWAPPER)
|
|
continue;
|
|
pud = pud_offset(pgd, 0);
|
|
if (pud_none_or_clear_bad(pud))
|
|
continue;
|
|
pmd = pmd_offset(pud, 0);
|
|
pud_clear(pud);
|
|
pmd_free(mm, pmd);
|
|
mm_dec_nr_pmds(mm);
|
|
pgd_clear(pgd);
|
|
pud_free(mm, pud);
|
|
}
|
|
#endif
|
|
__pgd_free(pgd_base);
|
|
}
|