mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-16 08:44:21 +08:00
ae2de10173
With LPAE, the pgd is a separate page table with entries pointing to the pmd. The identity_mapping_add() function needs to ensure that the pgd is populated before populating the pmd level. The do..while blocks now loop over the pmd in order to have the same implementation for the two page table formats. The pmd_addr_end() definition has been removed and the generic one used instead. The pmd clean-up is done in the pgd_free() function. Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
114 lines
2.7 KiB
C
114 lines
2.7 KiB
C
#include <linux/kernel.h>
|
|
|
|
#include <asm/cputype.h>
|
|
#include <asm/idmap.h>
|
|
#include <asm/pgalloc.h>
|
|
#include <asm/pgtable.h>
|
|
#include <asm/sections.h>
|
|
|
|
pgd_t *idmap_pgd;
|
|
|
|
#ifdef CONFIG_ARM_LPAE
|
|
static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
|
|
unsigned long prot)
|
|
{
|
|
pmd_t *pmd;
|
|
unsigned long next;
|
|
|
|
if (pud_none_or_clear_bad(pud) || (pud_val(*pud) & L_PGD_SWAPPER)) {
|
|
pmd = pmd_alloc_one(&init_mm, addr);
|
|
if (!pmd) {
|
|
pr_warning("Failed to allocate identity pmd.\n");
|
|
return;
|
|
}
|
|
pud_populate(&init_mm, pud, pmd);
|
|
pmd += pmd_index(addr);
|
|
} else
|
|
pmd = pmd_offset(pud, addr);
|
|
|
|
do {
|
|
next = pmd_addr_end(addr, end);
|
|
*pmd = __pmd((addr & PMD_MASK) | prot);
|
|
flush_pmd_entry(pmd);
|
|
} while (pmd++, addr = next, addr != end);
|
|
}
|
|
#else /* !CONFIG_ARM_LPAE */
|
|
static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
|
|
unsigned long prot)
|
|
{
|
|
pmd_t *pmd = pmd_offset(pud, addr);
|
|
|
|
addr = (addr & PMD_MASK) | prot;
|
|
pmd[0] = __pmd(addr);
|
|
addr += SECTION_SIZE;
|
|
pmd[1] = __pmd(addr);
|
|
flush_pmd_entry(pmd);
|
|
}
|
|
#endif /* CONFIG_ARM_LPAE */
|
|
|
|
static void idmap_add_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
|
|
unsigned long prot)
|
|
{
|
|
pud_t *pud = pud_offset(pgd, addr);
|
|
unsigned long next;
|
|
|
|
do {
|
|
next = pud_addr_end(addr, end);
|
|
idmap_add_pmd(pud, addr, next, prot);
|
|
} while (pud++, addr = next, addr != end);
|
|
}
|
|
|
|
static void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long end)
|
|
{
|
|
unsigned long prot, next;
|
|
|
|
prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AF;
|
|
if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale())
|
|
prot |= PMD_BIT4;
|
|
|
|
pgd += pgd_index(addr);
|
|
do {
|
|
next = pgd_addr_end(addr, end);
|
|
idmap_add_pud(pgd, addr, next, prot);
|
|
} while (pgd++, addr = next, addr != end);
|
|
}
|
|
|
|
extern char __idmap_text_start[], __idmap_text_end[];
|
|
|
|
static int __init init_static_idmap(void)
|
|
{
|
|
phys_addr_t idmap_start, idmap_end;
|
|
|
|
idmap_pgd = pgd_alloc(&init_mm);
|
|
if (!idmap_pgd)
|
|
return -ENOMEM;
|
|
|
|
/* Add an identity mapping for the physical address of the section. */
|
|
idmap_start = virt_to_phys((void *)__idmap_text_start);
|
|
idmap_end = virt_to_phys((void *)__idmap_text_end);
|
|
|
|
pr_info("Setting up static identity map for 0x%llx - 0x%llx\n",
|
|
(long long)idmap_start, (long long)idmap_end);
|
|
identity_mapping_add(idmap_pgd, idmap_start, idmap_end);
|
|
|
|
return 0;
|
|
}
|
|
early_initcall(init_static_idmap);
|
|
|
|
/*
|
|
* In order to soft-boot, we need to switch to a 1:1 mapping for the
|
|
* cpu_reset functions. This will then ensure that we have predictable
|
|
* results when turning off the mmu.
|
|
*/
|
|
void setup_mm_for_reboot(void)
|
|
{
|
|
/* Clean and invalidate L1. */
|
|
flush_cache_all();
|
|
|
|
/* Switch to the identity mapping. */
|
|
cpu_switch_mm(idmap_pgd, &init_mm);
|
|
|
|
/* Flush the TLB. */
|
|
local_flush_tlb_all();
|
|
}
|