mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-02 00:24:12 +08:00
50f11a8a46
The PTE allocations in arm64 are identical to the generic ones modulo the GFP flags. Using the generic pte_alloc_one() functions ensures that the user page tables are allocated with __GFP_ACCOUNT set. The arm64 definition of PGALLOC_GFP is removed and replaced with GFP_PGTABLE_USER for p[gum]d_alloc_one() for the user page tables and GFP_PGTABLE_KERNEL for the kernel page tables. The KVM memory cache is now using GFP_PGTABLE_USER. The mappings created with create_pgd_mapping() are now using GFP_PGTABLE_KERNEL. The conversion to the generic version of pte_free_kernel() removes the NULL check for pte. The pte_free() version on arm64 is identical to the generic one and can be simply dropped. [cai@lca.pw: fix a bogus GFP flag in pgd_alloc()] Link: https://lore.kernel.org/r/1559656836-24940-1-git-send-email-cai@lca.pw/ [and fix it more] Link: https://lore.kernel.org/linux-mm/20190617151252.GF16810@rapoport-lnx/ Link: http://lkml.kernel.org/r/1557296232-15361-5-git-send-email-rppt@linux.ibm.com Signed-off-by: Mike Rapoport <rppt@linux.ibm.com> Cc: Albert Ou <aou@eecs.berkeley.edu> Cc: Anshuman Khandual <anshuman.khandual@arm.com> Cc: Anton Ivanov <anton.ivanov@cambridgegreys.com> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Greentime Hu <green.hu@gmail.com> Cc: Guan Xuetao <gxt@pku.edu.cn> Cc: Guo Ren <guoren@kernel.org> Cc: Guo Ren <ren_guo@c-sky.com> Cc: Helge Deller <deller@gmx.de> Cc: Ley Foon Tan <lftan@altera.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Matt Turner <mattst88@gmail.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Michal Hocko <mhocko@suse.com> Cc: Palmer Dabbelt <palmer@sifive.com> Cc: Paul Burton <paul.burton@mips.com> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: Richard Kuo <rkuo@codeaurora.org> Cc: Richard Weinberger <richard@nod.at> Cc: Russell King <linux@armlinux.org.uk> Cc: Sam Creasey <sammy@sammy.net> Cc: Vincent Chen <deanbo422@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
124 lines
2.9 KiB
C
124 lines
2.9 KiB
C
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
/*
|
|
* Based on arch/arm/include/asm/pgalloc.h
|
|
*
|
|
* Copyright (C) 2000-2001 Russell King
|
|
* Copyright (C) 2012 ARM Ltd.
|
|
*/
|
|
#ifndef __ASM_PGALLOC_H
|
|
#define __ASM_PGALLOC_H
|
|
|
|
#include <asm/pgtable-hwdef.h>
|
|
#include <asm/processor.h>
|
|
#include <asm/cacheflush.h>
|
|
#include <asm/tlbflush.h>
|
|
|
|
#include <asm-generic/pgalloc.h> /* for pte_{alloc,free}_one */
|
|
|
|
#define check_pgt_cache() do { } while (0)
|
|
|
|
#define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
|
|
|
|
#if CONFIG_PGTABLE_LEVELS > 2
|
|
|
|
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
|
|
{
|
|
gfp_t gfp = GFP_PGTABLE_USER;
|
|
struct page *page;
|
|
|
|
if (mm == &init_mm)
|
|
gfp = GFP_PGTABLE_KERNEL;
|
|
|
|
page = alloc_page(gfp);
|
|
if (!page)
|
|
return NULL;
|
|
if (!pgtable_pmd_page_ctor(page)) {
|
|
__free_page(page);
|
|
return NULL;
|
|
}
|
|
return page_address(page);
|
|
}
|
|
|
|
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmdp)
|
|
{
|
|
BUG_ON((unsigned long)pmdp & (PAGE_SIZE-1));
|
|
pgtable_pmd_page_dtor(virt_to_page(pmdp));
|
|
free_page((unsigned long)pmdp);
|
|
}
|
|
|
|
static inline void __pud_populate(pud_t *pudp, phys_addr_t pmdp, pudval_t prot)
|
|
{
|
|
set_pud(pudp, __pud(__phys_to_pud_val(pmdp) | prot));
|
|
}
|
|
|
|
static inline void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmdp)
|
|
{
|
|
__pud_populate(pudp, __pa(pmdp), PMD_TYPE_TABLE);
|
|
}
|
|
#else
|
|
static inline void __pud_populate(pud_t *pudp, phys_addr_t pmdp, pudval_t prot)
|
|
{
|
|
BUILD_BUG();
|
|
}
|
|
#endif /* CONFIG_PGTABLE_LEVELS > 2 */
|
|
|
|
#if CONFIG_PGTABLE_LEVELS > 3
|
|
|
|
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
|
|
{
|
|
return (pud_t *)__get_free_page(GFP_PGTABLE_USER);
|
|
}
|
|
|
|
static inline void pud_free(struct mm_struct *mm, pud_t *pudp)
|
|
{
|
|
BUG_ON((unsigned long)pudp & (PAGE_SIZE-1));
|
|
free_page((unsigned long)pudp);
|
|
}
|
|
|
|
static inline void __pgd_populate(pgd_t *pgdp, phys_addr_t pudp, pgdval_t prot)
|
|
{
|
|
set_pgd(pgdp, __pgd(__phys_to_pgd_val(pudp) | prot));
|
|
}
|
|
|
|
static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgdp, pud_t *pudp)
|
|
{
|
|
__pgd_populate(pgdp, __pa(pudp), PUD_TYPE_TABLE);
|
|
}
|
|
#else
|
|
static inline void __pgd_populate(pgd_t *pgdp, phys_addr_t pudp, pgdval_t prot)
|
|
{
|
|
BUILD_BUG();
|
|
}
|
|
#endif /* CONFIG_PGTABLE_LEVELS > 3 */
|
|
|
|
extern pgd_t *pgd_alloc(struct mm_struct *mm);
|
|
extern void pgd_free(struct mm_struct *mm, pgd_t *pgdp);
|
|
|
|
static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t ptep,
|
|
pmdval_t prot)
|
|
{
|
|
set_pmd(pmdp, __pmd(__phys_to_pmd_val(ptep) | prot));
|
|
}
|
|
|
|
/*
|
|
* Populate the pmdp entry with a pointer to the pte. This pmd is part
|
|
* of the mm address space.
|
|
*/
|
|
static inline void
|
|
pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
|
|
{
|
|
/*
|
|
* The pmd must be loaded with the physical address of the PTE table
|
|
*/
|
|
__pmd_populate(pmdp, __pa(ptep), PMD_TYPE_TABLE);
|
|
}
|
|
|
|
static inline void
|
|
pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
|
|
{
|
|
__pmd_populate(pmdp, page_to_phys(ptep), PMD_TYPE_TABLE);
|
|
}
|
|
#define pmd_pgtable(pmd) pmd_page(pmd)
|
|
|
|
#endif
|