mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2025-01-25 07:14:36 +08:00
34bfeea4a9
Pages allocated by the kernel are not guaranteed to have the tags zeroed, especially as the kernel does not (yet) use MTE itself. To ensure the user can still access such pages when mapped into its address space, clear the tags via set_pte_at(). A new page flag - PG_mte_tagged (PG_arch_2) - is used to track pages with valid allocation tags. Since the zero page is mapped as pte_special(), it won't be covered by the above set_pte_at() mechanism. Clear its tags during early MTE initialisation. Co-developed-by: Steven Price <steven.price@arm.com> Signed-off-by: Steven Price <steven.price@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> Cc: Will Deacon <will@kernel.org>
36 lines
775 B
C
36 lines
775 B
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
/*
|
|
* Copyright (C) 2020 ARM Ltd.
|
|
*/
|
|
|
|
#include <linux/bitops.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/thread_info.h>
|
|
|
|
#include <asm/cpufeature.h>
|
|
#include <asm/mte.h>
|
|
#include <asm/sysreg.h>
|
|
|
|
void mte_sync_tags(pte_t *ptep, pte_t pte)
|
|
{
|
|
struct page *page = pte_page(pte);
|
|
long i, nr_pages = compound_nr(page);
|
|
|
|
/* if PG_mte_tagged is set, tags have already been initialised */
|
|
for (i = 0; i < nr_pages; i++, page++) {
|
|
if (!test_and_set_bit(PG_mte_tagged, &page->flags))
|
|
mte_clear_page_tags(page_address(page));
|
|
}
|
|
}
|
|
|
|
void flush_mte_state(void)
|
|
{
|
|
if (!system_supports_mte())
|
|
return;
|
|
|
|
/* clear any pending asynchronous tag fault */
|
|
dsb(ish);
|
|
write_sysreg_s(0, SYS_TFSRE0_EL1);
|
|
clear_thread_flag(TIF_MTE_ASYNC_FAULT);
|
|
}
|