mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-03 09:04:21 +08:00
3335068f87
During the early page table creation, we used to set the mapping for PAGE_OFFSET to the kernel load address: but the kernel load address is always offseted by PMD_SIZE which makes it impossible to use PUD/P4D/PGD pages as this physical address is not aligned on PUD/P4D/PGD size (whereas PAGE_OFFSET is). But actually we don't have to establish this mapping (ie set va_pa_offset) that early in the boot process because: - first, setup_vm installs a temporary kernel mapping and among other things, discovers the system memory, - then, setup_vm_final creates the final kernel mapping and takes advantage of the discovered system memory to create the linear mapping. During the first phase, we don't know the start of the system memory and then until the second phase is finished, we can't use the linear mapping at all and phys_to_virt/virt_to_phys translations must not be used because it would result in a different translation from the 'real' one once the final mapping is installed. So here we simply delay the initialization of va_pa_offset to after the system memory discovery. But to make sure noone uses the linear mapping before, we add some guard in the DEBUG_VIRTUAL config. Finally we can use PUD/P4D/PGD hugepages when possible, which will result in a better TLB utilization. Note that: - this does not apply to rv32 as the kernel mapping lies in the linear mapping. - we rely on the firmware to protect itself using PMP. Signed-off-by: Alexandre Ghiti <alexghiti@rivosinc.com> Acked-by: Rob Herring <robh@kernel.org> # DT bits Reviewed-by: Andrew Jones <ajones@ventanamicro.com> Reviewed-by: Anup Patel <anup@brainfault.org> Tested-by: Anup Patel <anup@brainfault.org> Link: https://lore.kernel.org/r/20230324155421.271544-4-alexghiti@rivosinc.com Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
52 lines
1.3 KiB
C
52 lines
1.3 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
#include <linux/types.h>
|
|
#include <linux/mmdebug.h>
|
|
#include <linux/mm.h>
|
|
#include <asm/page.h>
|
|
#include <asm/sections.h>
|
|
|
|
phys_addr_t __virt_to_phys(unsigned long x)
|
|
{
|
|
/*
|
|
* Boundary checking aginst the kernel linear mapping space.
|
|
*/
|
|
WARN(!is_linear_mapping(x) && !is_kernel_mapping(x),
|
|
"virt_to_phys used for non-linear address: %pK (%pS)\n",
|
|
(void *)x, (void *)x);
|
|
|
|
return __va_to_pa_nodebug(x);
|
|
}
|
|
EXPORT_SYMBOL(__virt_to_phys);
|
|
|
|
phys_addr_t __phys_addr_symbol(unsigned long x)
|
|
{
|
|
unsigned long kernel_start = kernel_map.virt_addr;
|
|
unsigned long kernel_end = kernel_start + kernel_map.size;
|
|
|
|
/*
|
|
* Boundary checking aginst the kernel image mapping.
|
|
* __pa_symbol should only be used on kernel symbol addresses.
|
|
*/
|
|
VIRTUAL_BUG_ON(x < kernel_start || x > kernel_end);
|
|
|
|
return __va_to_pa_nodebug(x);
|
|
}
|
|
EXPORT_SYMBOL(__phys_addr_symbol);
|
|
|
|
phys_addr_t linear_mapping_va_to_pa(unsigned long x)
|
|
{
|
|
BUG_ON(!kernel_map.va_pa_offset);
|
|
|
|
return ((unsigned long)(x) - kernel_map.va_pa_offset);
|
|
}
|
|
EXPORT_SYMBOL(linear_mapping_va_to_pa);
|
|
|
|
void *linear_mapping_pa_to_va(unsigned long x)
|
|
{
|
|
BUG_ON(!kernel_map.va_pa_offset);
|
|
|
|
return ((void *)((unsigned long)(x) + kernel_map.va_pa_offset));
|
|
}
|
|
EXPORT_SYMBOL(linear_mapping_pa_to_va);
|