mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-15 16:24:13 +08:00
7af5b901e8
With LPAE enabled, privileged no-access cannot be enforced using CPU domains as such feature is not available. This patch implements PAN by disabling TTBR0 page table walks while in kernel mode. The ARM architecture allows page table walks to be split between TTBR0 and TTBR1. With LPAE enabled, the split is defined by a combination of TTBCR T0SZ and T1SZ bits. Currently, an LPAE-enabled kernel uses TTBR0 for user addresses and TTBR1 for kernel addresses with the VMSPLIT_2G and VMSPLIT_3G configurations. The main advantage for the 3:1 split is that TTBR1 is reduced to 2 levels, so potentially faster TLB refill (though usually the first level entries are already cached in the TLB). The PAN support on LPAE-enabled kernels uses TTBR0 when running in user space or in kernel space during user access routines (TTBCR T0SZ and T1SZ are both 0). When running user accesses are disabled in kernel mode, TTBR0 page table walks are disabled by setting TTBCR.EPD0. TTBR1 is used for kernel accesses (including loadable modules; anything covered by swapper_pg_dir) by reducing the TTBCR.T0SZ to the minimum (2^(32-7) = 32MB). To avoid user accesses potentially hitting stale TLB entries, the ASID is switched to 0 (reserved) by setting TTBCR.A1 and using the ASID value in TTBR1. The difference from a non-PAN kernel is that with the 3:1 memory split, TTBR1 always uses 3 levels of page tables. As part of the change we are using preprocessor elif definied() clauses so balance these clauses by converting relevant precedingt ifdef clauses to if defined() clauses. Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> Reviewed-by: Kees Cook <keescook@chromium.org> Tested-by: Florian Fainelli <florian.fainelli@broadcom.com> Signed-off-by: Linus Walleij <linus.walleij@linaro.org> Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
131 lines
3.3 KiB
C
131 lines
3.3 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
#include <linux/ftrace.h>
|
|
#include <linux/init.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/mm_types.h>
|
|
#include <linux/pgtable.h>
|
|
|
|
#include <asm/bugs.h>
|
|
#include <asm/cacheflush.h>
|
|
#include <asm/idmap.h>
|
|
#include <asm/page.h>
|
|
#include <asm/smp_plat.h>
|
|
#include <asm/suspend.h>
|
|
#include <asm/tlbflush.h>
|
|
#include <asm/uaccess.h>
|
|
|
|
extern int __cpu_suspend(unsigned long, int (*)(unsigned long), u32 cpuid);
|
|
extern void cpu_resume_mmu(void);
|
|
|
|
#ifdef CONFIG_MMU
|
|
int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
|
|
{
|
|
struct mm_struct *mm = current->active_mm;
|
|
u32 __mpidr = cpu_logical_map(smp_processor_id());
|
|
int ret;
|
|
|
|
if (!idmap_pgd)
|
|
return -EINVAL;
|
|
|
|
/*
|
|
* Needed for the MMU disabling/enabing code to be able to run from
|
|
* TTBR0 addresses.
|
|
*/
|
|
if (IS_ENABLED(CONFIG_CPU_TTBR0_PAN))
|
|
uaccess_save_and_enable();
|
|
|
|
/*
|
|
* Function graph tracer state gets incosistent when the kernel
|
|
* calls functions that never return (aka suspend finishers) hence
|
|
* disable graph tracing during their execution.
|
|
*/
|
|
pause_graph_tracing();
|
|
|
|
/*
|
|
* Provide a temporary page table with an identity mapping for
|
|
* the MMU-enable code, required for resuming. On successful
|
|
* resume (indicated by a zero return code), we need to switch
|
|
* back to the correct page tables.
|
|
*/
|
|
ret = __cpu_suspend(arg, fn, __mpidr);
|
|
|
|
unpause_graph_tracing();
|
|
|
|
if (ret == 0) {
|
|
cpu_switch_mm(mm->pgd, mm);
|
|
local_flush_bp_all();
|
|
local_flush_tlb_all();
|
|
check_other_bugs();
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
#else
|
|
int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
|
|
{
|
|
u32 __mpidr = cpu_logical_map(smp_processor_id());
|
|
int ret;
|
|
|
|
pause_graph_tracing();
|
|
ret = __cpu_suspend(arg, fn, __mpidr);
|
|
unpause_graph_tracing();
|
|
|
|
return ret;
|
|
}
|
|
#define idmap_pgd NULL
|
|
#endif
|
|
|
|
/*
|
|
* This is called by __cpu_suspend() to save the state, and do whatever
|
|
* flushing is required to ensure that when the CPU goes to sleep we have
|
|
* the necessary data available when the caches are not searched.
|
|
*/
|
|
void __cpu_suspend_save(u32 *ptr, u32 ptrsz, u32 sp, u32 *save_ptr)
|
|
{
|
|
u32 *ctx = ptr;
|
|
|
|
*save_ptr = virt_to_phys(ptr);
|
|
|
|
/* This must correspond to the LDM in cpu_resume() assembly */
|
|
*ptr++ = virt_to_phys(idmap_pgd);
|
|
*ptr++ = sp;
|
|
*ptr++ = virt_to_phys(cpu_do_resume);
|
|
|
|
cpu_do_suspend(ptr);
|
|
|
|
flush_cache_louis();
|
|
|
|
/*
|
|
* flush_cache_louis does not guarantee that
|
|
* save_ptr and ptr are cleaned to main memory,
|
|
* just up to the Level of Unification Inner Shareable.
|
|
* Since the context pointer and context itself
|
|
* are to be retrieved with the MMU off that
|
|
* data must be cleaned from all cache levels
|
|
* to main memory using "area" cache primitives.
|
|
*/
|
|
__cpuc_flush_dcache_area(ctx, ptrsz);
|
|
__cpuc_flush_dcache_area(save_ptr, sizeof(*save_ptr));
|
|
|
|
outer_clean_range(*save_ptr, *save_ptr + ptrsz);
|
|
outer_clean_range(virt_to_phys(save_ptr),
|
|
virt_to_phys(save_ptr) + sizeof(*save_ptr));
|
|
}
|
|
|
|
extern struct sleep_save_sp sleep_save_sp;
|
|
|
|
static int cpu_suspend_alloc_sp(void)
|
|
{
|
|
void *ctx_ptr;
|
|
/* ctx_ptr is an array of physical addresses */
|
|
ctx_ptr = kcalloc(mpidr_hash_size(), sizeof(u32), GFP_KERNEL);
|
|
|
|
if (WARN_ON(!ctx_ptr))
|
|
return -ENOMEM;
|
|
sleep_save_sp.save_ptr_stash = ctx_ptr;
|
|
sleep_save_sp.save_ptr_stash_phys = virt_to_phys(ctx_ptr);
|
|
sync_cache_w(&sleep_save_sp);
|
|
return 0;
|
|
}
|
|
early_initcall(cpu_suspend_alloc_sp);
|