2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-15 08:44:14 +08:00

Merge branch 'for-next/mm' into for-next/core

* for-next/mm:
  Documentation: vmcoreinfo: Fix htmldocs warning
  arm64/mm: Drop use_1G_block()
  arm64: avoid flushing icache multiple times on contiguous HugeTLB
  arm64: crash_core: Export MODULES, VMALLOC, and VMEMMAP ranges
  arm64/hugetlb: Define __hugetlb_valid_size()
  arm64/mm: avoid fixmap race condition when create pud mapping
  arm64/mm: Consolidate TCR_EL1 fields
This commit is contained in:
Will Deacon 2022-03-14 19:01:18 +00:00
commit 20fd2ed10f
8 changed files with 57 additions and 42 deletions

View File

@ -494,6 +494,14 @@ architecture which is used to lookup the page-tables for the Virtual
addresses in the higher VA range (refer to ARMv8 ARM document for
more details).
MODULES_VADDR|MODULES_END|VMALLOC_START|VMALLOC_END|VMEMMAP_START|VMEMMAP_END
-----------------------------------------------------------------------------
Used to get the correct ranges:
MODULES_VADDR ~ MODULES_END-1 : Kernel module space.
VMALLOC_START ~ VMALLOC_END-1 : vmalloc() / ioremap() space.
VMEMMAP_START ~ VMEMMAP_END-1 : vmemmap region, used for struct page array.
arm
===

View File

@ -273,6 +273,8 @@
#define TCR_NFD1 (UL(1) << 54)
#define TCR_E0PD0 (UL(1) << 55)
#define TCR_E0PD1 (UL(1) << 56)
#define TCR_TCMA0 (UL(1) << 57)
#define TCR_TCMA1 (UL(1) << 58)
/*
* TTBR.

View File

@ -1101,10 +1101,6 @@
#define CPACR_EL1_ZEN_EL0EN (BIT(17)) /* enable EL0 access, if EL1EN set */
#define CPACR_EL1_ZEN (CPACR_EL1_ZEN_EL1EN | CPACR_EL1_ZEN_EL0EN)
/* TCR EL1 Bit Definitions */
#define SYS_TCR_EL1_TCMA1 (BIT(58))
#define SYS_TCR_EL1_TCMA0 (BIT(57))
/* GCR_EL1 Definitions */
#define SYS_GCR_EL1_RRND (BIT(16))
#define SYS_GCR_EL1_EXCL_MASK 0xffffUL

View File

@ -20,6 +20,12 @@ void arch_crash_save_vmcoreinfo(void)
{
VMCOREINFO_NUMBER(VA_BITS);
/* Please note VMCOREINFO_NUMBER() uses "%d", not "%x" */
vmcoreinfo_append_str("NUMBER(MODULES_VADDR)=0x%lx\n", MODULES_VADDR);
vmcoreinfo_append_str("NUMBER(MODULES_END)=0x%lx\n", MODULES_END);
vmcoreinfo_append_str("NUMBER(VMALLOC_START)=0x%lx\n", VMALLOC_START);
vmcoreinfo_append_str("NUMBER(VMALLOC_END)=0x%lx\n", VMALLOC_END);
vmcoreinfo_append_str("NUMBER(VMEMMAP_START)=0x%lx\n", VMEMMAP_START);
vmcoreinfo_append_str("NUMBER(VMEMMAP_END)=0x%lx\n", VMEMMAP_END);
vmcoreinfo_append_str("NUMBER(kimage_voffset)=0x%llx\n",
kimage_voffset);
vmcoreinfo_append_str("NUMBER(PHYS_OFFSET)=0x%llx\n",

View File

@ -52,6 +52,13 @@ void __sync_icache_dcache(pte_t pte)
{
struct page *page = pte_page(pte);
/*
* HugeTLB pages are always fully mapped, so only setting head page's
* PG_dcache_clean flag is enough.
*/
if (PageHuge(page))
page = compound_head(page);
if (!test_bit(PG_dcache_clean, &page->flags)) {
sync_icache_aliases((unsigned long)page_address(page),
(unsigned long)page_address(page) +

View File

@ -56,24 +56,33 @@ void __init arm64_hugetlb_cma_reserve(void)
}
#endif /* CONFIG_CMA */
static bool __hugetlb_valid_size(unsigned long size)
{
switch (size) {
#ifndef __PAGETABLE_PMD_FOLDED
case PUD_SIZE:
return pud_sect_supported();
#endif
case CONT_PMD_SIZE:
case PMD_SIZE:
case CONT_PTE_SIZE:
return true;
}
return false;
}
#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
bool arch_hugetlb_migration_supported(struct hstate *h)
{
size_t pagesize = huge_page_size(h);
switch (pagesize) {
#ifndef __PAGETABLE_PMD_FOLDED
case PUD_SIZE:
return pud_sect_supported();
#endif
case PMD_SIZE:
case CONT_PMD_SIZE:
case CONT_PTE_SIZE:
return true;
}
pr_warn("%s: unrecognized huge page size 0x%lx\n",
if (!__hugetlb_valid_size(pagesize)) {
pr_warn("%s: unrecognized huge page size 0x%lx\n",
__func__, pagesize);
return false;
return false;
}
return true;
}
#endif
@ -506,16 +515,5 @@ arch_initcall(hugetlbpage_init);
bool __init arch_hugetlb_valid_size(unsigned long size)
{
switch (size) {
#ifndef __PAGETABLE_PMD_FOLDED
case PUD_SIZE:
return pud_sect_supported();
#endif
case CONT_PMD_SIZE:
case PMD_SIZE:
case CONT_PTE_SIZE:
return true;
}
return false;
return __hugetlb_valid_size(size);
}

View File

@ -63,6 +63,7 @@ static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss __maybe_unused;
static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss __maybe_unused;
static DEFINE_SPINLOCK(swapper_pgdir_lock);
static DEFINE_MUTEX(fixmap_lock);
void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd)
{
@ -294,18 +295,6 @@ static void alloc_init_cont_pmd(pud_t *pudp, unsigned long addr,
} while (addr = next, addr != end);
}
static inline bool use_1G_block(unsigned long addr, unsigned long next,
unsigned long phys)
{
if (PAGE_SHIFT != 12)
return false;
if (((addr | next | phys) & ~PUD_MASK) != 0)
return false;
return true;
}
static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end,
phys_addr_t phys, pgprot_t prot,
phys_addr_t (*pgtable_alloc)(int),
@ -329,6 +318,12 @@ static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end,
}
BUG_ON(p4d_bad(p4d));
/*
* No need for locking during early boot. And it doesn't work as
* expected with KASLR enabled.
*/
if (system_state != SYSTEM_BOOTING)
mutex_lock(&fixmap_lock);
pudp = pud_set_fixmap_offset(p4dp, addr);
do {
pud_t old_pud = READ_ONCE(*pudp);
@ -338,7 +333,8 @@ static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end,
/*
* For 4K granule only, attempt to put down a 1GB block
*/
if (use_1G_block(addr, next, phys) &&
if (pud_sect_supported() &&
((addr | next | phys) & ~PUD_MASK) == 0 &&
(flags & NO_BLOCK_MAPPINGS) == 0) {
pud_set_huge(pudp, phys, prot);
@ -359,6 +355,8 @@ static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end,
} while (pudp++, addr = next, addr != end);
pud_clear_fixmap();
if (system_state != SYSTEM_BOOTING)
mutex_unlock(&fixmap_lock);
}
static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,

View File

@ -46,7 +46,7 @@
#endif
#ifdef CONFIG_KASAN_HW_TAGS
#define TCR_MTE_FLAGS SYS_TCR_EL1_TCMA1 | TCR_TBI1 | TCR_TBID1
#define TCR_MTE_FLAGS TCR_TCMA1 | TCR_TBI1 | TCR_TBID1
#else
/*
* The mte_zero_clear_page_tags() implementation uses DC GZVA, which relies on