From e921da6bc7cac5f0e8458fe5df18ae08eb538f54 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Tue, 25 Jan 2022 20:08:33 +0530 Subject: [PATCH 1/7] arm64/mm: Consolidate TCR_EL1 fields This renames and moves SYS_TCR_EL1_TCMA1 and SYS_TCR_EL1_TCMA0 definitions into pgtable-hwdef.h thus consolidating all TCR fields in a single header. This does not cause any functional change. Cc: Catalin Marinas Cc: Will Deacon Cc: linux-arm-kernel@lists.infradead.org Cc: linux-kernel@vger.kernel.org Acked-by: Catalin Marinas Signed-off-by: Anshuman Khandual Link: https://lore.kernel.org/r/1643121513-21854-1-git-send-email-anshuman.khandual@arm.com Signed-off-by: Will Deacon --- arch/arm64/include/asm/pgtable-hwdef.h | 2 ++ arch/arm64/include/asm/sysreg.h | 4 ---- arch/arm64/mm/proc.S | 2 +- 3 files changed, 3 insertions(+), 5 deletions(-) diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h index 40085e53f573..66671ff05183 100644 --- a/arch/arm64/include/asm/pgtable-hwdef.h +++ b/arch/arm64/include/asm/pgtable-hwdef.h @@ -273,6 +273,8 @@ #define TCR_NFD1 (UL(1) << 54) #define TCR_E0PD0 (UL(1) << 55) #define TCR_E0PD1 (UL(1) << 56) +#define TCR_TCMA0 (UL(1) << 57) +#define TCR_TCMA1 (UL(1) << 58) /* * TTBR. diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 898bee0004ae..34800d264f69 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -1101,10 +1101,6 @@ #define CPACR_EL1_ZEN_EL0EN (BIT(17)) /* enable EL0 access, if EL1EN set */ #define CPACR_EL1_ZEN (CPACR_EL1_ZEN_EL1EN | CPACR_EL1_ZEN_EL0EN) -/* TCR EL1 Bit Definitions */ -#define SYS_TCR_EL1_TCMA1 (BIT(58)) -#define SYS_TCR_EL1_TCMA0 (BIT(57)) - /* GCR_EL1 Definitions */ #define SYS_GCR_EL1_RRND (BIT(16)) #define SYS_GCR_EL1_EXCL_MASK 0xffffUL diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index d35c90d2e47a..50bbed947bec 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -46,7 +46,7 @@ #endif #ifdef CONFIG_KASAN_HW_TAGS -#define TCR_MTE_FLAGS SYS_TCR_EL1_TCMA1 | TCR_TBI1 | TCR_TBID1 +#define TCR_MTE_FLAGS TCR_TCMA1 | TCR_TBI1 | TCR_TBID1 #else /* * The mte_zero_clear_page_tags() implementation uses DC GZVA, which relies on From ee017ee353506fcec58e481673e4331ff198a80e Mon Sep 17 00:00:00 2001 From: Jianyong Wu Date: Tue, 1 Feb 2022 19:44:00 +0800 Subject: [PATCH 2/7] arm64/mm: avoid fixmap race condition when create pud mapping The 'fixmap' is a global resource and is used recursively by create pud mapping(), leading to a potential race condition in the presence of a concurrent call to alloc_init_pud(): kernel_init thread virtio-mem workqueue thread ================== =========================== alloc_init_pud(...) alloc_init_pud(...) pudp = pud_set_fixmap_offset(...) pudp = pud_set_fixmap_offset(...) READ_ONCE(*pudp) pud_clear_fixmap(...) READ_ONCE(*pudp) // CRASH! As kernel may sleep during creating pud mapping, introduce a mutex lock to serialise use of the fixmap entries by alloc_init_pud(). However, there is no need for locking in early boot stage and it doesn't work well with KASLR enabled when early boot. So, enable lock when system_state doesn't equal to "SYSTEM_BOOTING". Signed-off-by: Jianyong Wu Reviewed-by: Catalin Marinas Fixes: f4710445458c ("arm64: mm: use fixmap when creating page tables") Link: https://lore.kernel.org/r/20220201114400.56885-1-jianyong.wu@arm.com Signed-off-by: Will Deacon --- arch/arm64/mm/mmu.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index acfae9b41cc8..1681430ecab7 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -63,6 +63,7 @@ static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss __maybe_unused; static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss __maybe_unused; static DEFINE_SPINLOCK(swapper_pgdir_lock); +static DEFINE_MUTEX(fixmap_lock); void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd) { @@ -329,6 +330,12 @@ static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end, } BUG_ON(p4d_bad(p4d)); + /* + * No need for locking during early boot. And it doesn't work as + * expected with KASLR enabled. + */ + if (system_state != SYSTEM_BOOTING) + mutex_lock(&fixmap_lock); pudp = pud_set_fixmap_offset(p4dp, addr); do { pud_t old_pud = READ_ONCE(*pudp); @@ -359,6 +366,8 @@ static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end, } while (pudp++, addr = next, addr != end); pud_clear_fixmap(); + if (system_state != SYSTEM_BOOTING) + mutex_unlock(&fixmap_lock); } static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys, From a8a733b20109fc85a5b2e0318cef036b2c818ac3 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Thu, 17 Feb 2022 10:22:37 +0530 Subject: [PATCH 3/7] arm64/hugetlb: Define __hugetlb_valid_size() arch_hugetlb_valid_size() can be just factored out to create another helper to be used in arch_hugetlb_migration_supported() as well. This just defines __hugetlb_valid_size() for that purpose. Cc: Catalin Marinas Cc: Will Deacon Cc: linux-arm-kernel@lists.infradead.org Cc: linux-kernel@vger.kernel.org Reviewed-by: Catalin Marinas Signed-off-by: Anshuman Khandual Link: https://lore.kernel.org/r/1645073557-6150-1-git-send-email-anshuman.khandual@arm.com Signed-off-by: Will Deacon --- arch/arm64/mm/hugetlbpage.c | 46 ++++++++++++++++++------------------- 1 file changed, 22 insertions(+), 24 deletions(-) diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c index ffb9c229610a..a33aba91ad89 100644 --- a/arch/arm64/mm/hugetlbpage.c +++ b/arch/arm64/mm/hugetlbpage.c @@ -56,24 +56,33 @@ void __init arm64_hugetlb_cma_reserve(void) } #endif /* CONFIG_CMA */ +static bool __hugetlb_valid_size(unsigned long size) +{ + switch (size) { +#ifndef __PAGETABLE_PMD_FOLDED + case PUD_SIZE: + return pud_sect_supported(); +#endif + case CONT_PMD_SIZE: + case PMD_SIZE: + case CONT_PTE_SIZE: + return true; + } + + return false; +} + #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION bool arch_hugetlb_migration_supported(struct hstate *h) { size_t pagesize = huge_page_size(h); - switch (pagesize) { -#ifndef __PAGETABLE_PMD_FOLDED - case PUD_SIZE: - return pud_sect_supported(); -#endif - case PMD_SIZE: - case CONT_PMD_SIZE: - case CONT_PTE_SIZE: - return true; - } - pr_warn("%s: unrecognized huge page size 0x%lx\n", + if (!__hugetlb_valid_size(pagesize)) { + pr_warn("%s: unrecognized huge page size 0x%lx\n", __func__, pagesize); - return false; + return false; + } + return true; } #endif @@ -506,16 +515,5 @@ arch_initcall(hugetlbpage_init); bool __init arch_hugetlb_valid_size(unsigned long size) { - switch (size) { -#ifndef __PAGETABLE_PMD_FOLDED - case PUD_SIZE: - return pud_sect_supported(); -#endif - case CONT_PMD_SIZE: - case PMD_SIZE: - case CONT_PTE_SIZE: - return true; - } - - return false; + return __hugetlb_valid_size(size); } From 2369f171d5c5550b85ce96fd35d4438cf2e6b09e Mon Sep 17 00:00:00 2001 From: Huang Shijie Date: Wed, 9 Feb 2022 09:26:42 +0000 Subject: [PATCH 4/7] arm64: crash_core: Export MODULES, VMALLOC, and VMEMMAP ranges The following interrelated ranges are needed by the kdump crash tool: MODULES_VADDR ~ MODULES_END, VMALLOC_START ~ VMALLOC_END, VMEMMAP_START ~ VMEMMAP_END Since these values change from time to time, it is preferable to export them via vmcoreinfo than to change the crash's code frequently. Signed-off-by: Huang Shijie Link: https://lore.kernel.org/r/20220209092642.9181-1-shijie@os.amperecomputing.com Signed-off-by: Will Deacon --- Documentation/admin-guide/kdump/vmcoreinfo.rst | 8 ++++++++ arch/arm64/kernel/crash_core.c | 6 ++++++ 2 files changed, 14 insertions(+) diff --git a/Documentation/admin-guide/kdump/vmcoreinfo.rst b/Documentation/admin-guide/kdump/vmcoreinfo.rst index 3861a25faae1..a339af45a22e 100644 --- a/Documentation/admin-guide/kdump/vmcoreinfo.rst +++ b/Documentation/admin-guide/kdump/vmcoreinfo.rst @@ -494,6 +494,14 @@ architecture which is used to lookup the page-tables for the Virtual addresses in the higher VA range (refer to ARMv8 ARM document for more details). +MODULES_VADDR|MODULES_END|VMALLOC_START|VMALLOC_END|VMEMMAP_START|VMEMMAP_END +------------- + +Used to get the correct ranges: + MODULES_VADDR ~ MODULES_END-1 : Kernel module space. + VMALLOC_START ~ VMALLOC_END-1 : vmalloc() / ioremap() space. + VMEMMAP_START ~ VMEMMAP_END-1 : vmemmap region, used for struct page array. + arm === diff --git a/arch/arm64/kernel/crash_core.c b/arch/arm64/kernel/crash_core.c index 314391a156ee..2b65aae332ce 100644 --- a/arch/arm64/kernel/crash_core.c +++ b/arch/arm64/kernel/crash_core.c @@ -20,6 +20,12 @@ void arch_crash_save_vmcoreinfo(void) { VMCOREINFO_NUMBER(VA_BITS); /* Please note VMCOREINFO_NUMBER() uses "%d", not "%x" */ + vmcoreinfo_append_str("NUMBER(MODULES_VADDR)=0x%lx\n", MODULES_VADDR); + vmcoreinfo_append_str("NUMBER(MODULES_END)=0x%lx\n", MODULES_END); + vmcoreinfo_append_str("NUMBER(VMALLOC_START)=0x%lx\n", VMALLOC_START); + vmcoreinfo_append_str("NUMBER(VMALLOC_END)=0x%lx\n", VMALLOC_END); + vmcoreinfo_append_str("NUMBER(VMEMMAP_START)=0x%lx\n", VMEMMAP_START); + vmcoreinfo_append_str("NUMBER(VMEMMAP_END)=0x%lx\n", VMEMMAP_END); vmcoreinfo_append_str("NUMBER(kimage_voffset)=0x%llx\n", kimage_voffset); vmcoreinfo_append_str("NUMBER(PHYS_OFFSET)=0x%llx\n", From cf5a501d985ba1b6ace9b18c64346441819bffea Mon Sep 17 00:00:00 2001 From: Muchun Song Date: Wed, 2 Mar 2022 16:46:23 +0800 Subject: [PATCH 5/7] arm64: avoid flushing icache multiple times on contiguous HugeTLB When a contiguous HugeTLB page is mapped, set_pte_at() will be called CONT_PTES/CONT_PMDS times. Therefore, __sync_icache_dcache() will flush cache multiple times if the page is executable (to ensure the I-D cache coherency). However, the first flushing cache already covers subsequent cache flush operations. So only flusing cache for the head page if it is a HugeTLB page to avoid redundant cache flushing. In the next patch, it is also depends on this change since the tail vmemmap pages of HugeTLB is mapped with read-only meanning only head page struct can be modified. Signed-off-by: Muchun Song Reviewed-by: Catalin Marinas Link: https://lore.kernel.org/r/20220302084624.33340-1-songmuchun@bytedance.com Signed-off-by: Will Deacon --- arch/arm64/mm/flush.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c index 2aaf950b906c..a06c6ac770d4 100644 --- a/arch/arm64/mm/flush.c +++ b/arch/arm64/mm/flush.c @@ -52,6 +52,13 @@ void __sync_icache_dcache(pte_t pte) { struct page *page = pte_page(pte); + /* + * HugeTLB pages are always fully mapped, so only setting head page's + * PG_dcache_clean flag is enough. + */ + if (PageHuge(page)) + page = compound_head(page); + if (!test_bit(PG_dcache_clean, &page->flags)) { sync_icache_aliases((unsigned long)page_address(page), (unsigned long)page_address(page) + From 1310222c276b7946e440a7ab49c1e1508561f5fd Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Wed, 16 Feb 2022 10:36:52 +0530 Subject: [PATCH 6/7] arm64/mm: Drop use_1G_block() pud_sect_supported() already checks for PUD level block mapping support i.e on ARM64_4K_PAGES config. Hence pud_sect_supported(), along with some other required alignment checks can help completely drop use_1G_block(). Cc: Catalin Marinas Cc: Will Deacon Cc: linux-arm-kernel@lists.infradead.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Anshuman Khandual Reviewed-by: Catalin Marinas Link: https://lore.kernel.org/r/1644988012-25455-1-git-send-email-anshuman.khandual@arm.com Signed-off-by: Will Deacon --- arch/arm64/mm/mmu.c | 15 ++------------- 1 file changed, 2 insertions(+), 13 deletions(-) diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 1681430ecab7..12feac701d8e 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -295,18 +295,6 @@ static void alloc_init_cont_pmd(pud_t *pudp, unsigned long addr, } while (addr = next, addr != end); } -static inline bool use_1G_block(unsigned long addr, unsigned long next, - unsigned long phys) -{ - if (PAGE_SHIFT != 12) - return false; - - if (((addr | next | phys) & ~PUD_MASK) != 0) - return false; - - return true; -} - static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end, phys_addr_t phys, pgprot_t prot, phys_addr_t (*pgtable_alloc)(int), @@ -345,7 +333,8 @@ static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end, /* * For 4K granule only, attempt to put down a 1GB block */ - if (use_1G_block(addr, next, phys) && + if (pud_sect_supported() && + ((addr | next | phys) & ~PUD_MASK) == 0 && (flags & NO_BLOCK_MAPPINGS) == 0) { pud_set_huge(pudp, phys, prot); From 3c3dd2c81a1036c487353f7b9a485268b2fc1d7f Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Wed, 9 Mar 2022 12:16:33 +0000 Subject: [PATCH 7/7] Documentation: vmcoreinfo: Fix htmldocs warning Since commit 2369f171d5c5 ("arm64: crash_core: Export MODULES, VMALLOC, and VMEMMAP ranges"), Stephen reports a warning when building htmldocs: | Documentation/admin-guide/kdump/vmcoreinfo.rst:498: WARNING: Title underline too short. Extend the underline to squash the warning. Fixes: 2369f171d5c5 ("arm64: crash_core: Export MODULES, VMALLOC, and VMEMMAP ranges") Reported-by: Stephen Rothwell Signed-off-by: Will Deacon --- Documentation/admin-guide/kdump/vmcoreinfo.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/admin-guide/kdump/vmcoreinfo.rst b/Documentation/admin-guide/kdump/vmcoreinfo.rst index a339af45a22e..8419019b6a88 100644 --- a/Documentation/admin-guide/kdump/vmcoreinfo.rst +++ b/Documentation/admin-guide/kdump/vmcoreinfo.rst @@ -495,7 +495,7 @@ addresses in the higher VA range (refer to ARMv8 ARM document for more details). MODULES_VADDR|MODULES_END|VMALLOC_START|VMALLOC_END|VMEMMAP_START|VMEMMAP_END -------------- +----------------------------------------------------------------------------- Used to get the correct ranges: MODULES_VADDR ~ MODULES_END-1 : Kernel module space.