mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-16 16:54:20 +08:00
3377e227af
Some users must have 4K pages while needing a 48-bit VA space size. The cleanest way do do this is to go to a 4-level page table for this case. Each page table level using order-0 pages adds 9 bits to the VA size (at 4K pages, so for four levels we get 9 * 4 + 12 == 48-bits. For the 4K page size case only we add support functions for the PUD level of the page table tree, also the TLB exception handlers get an extra level of tree walk. [david.daney@cavium.com: Forward port to v4.10.] [david.daney@cavium.com: Forward port to v4.11.] Signed-off-by: Alex Belits <alex.belits@cavium.com> Signed-off-by: David Daney <david.daney@cavium.com> Cc: James Hogan <james.hogan@imgtec.com> Cc: Alex Belits <alex.belits@cavium.com> Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/15312/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
127 lines
2.5 KiB
C
127 lines
2.5 KiB
C
/*
|
|
* This file is subject to the terms and conditions of the GNU General Public
|
|
* License. See the file "COPYING" in the main directory of this archive
|
|
* for more details.
|
|
*
|
|
* Copyright (C) 1999, 2000 by Silicon Graphics
|
|
* Copyright (C) 2003 by Ralf Baechle
|
|
*/
|
|
#include <linux/export.h>
|
|
#include <linux/init.h>
|
|
#include <linux/mm.h>
|
|
#include <asm/fixmap.h>
|
|
#include <asm/pgtable.h>
|
|
#include <asm/pgalloc.h>
|
|
#include <asm/tlbflush.h>
|
|
|
|
void pgd_init(unsigned long page)
|
|
{
|
|
unsigned long *p, *end;
|
|
unsigned long entry;
|
|
|
|
#if !defined(__PAGETABLE_PUD_FOLDED)
|
|
entry = (unsigned long)invalid_pud_table;
|
|
#elif !defined(__PAGETABLE_PMD_FOLDED)
|
|
entry = (unsigned long)invalid_pmd_table;
|
|
#else
|
|
entry = (unsigned long)invalid_pte_table;
|
|
#endif
|
|
|
|
p = (unsigned long *) page;
|
|
end = p + PTRS_PER_PGD;
|
|
|
|
do {
|
|
p[0] = entry;
|
|
p[1] = entry;
|
|
p[2] = entry;
|
|
p[3] = entry;
|
|
p[4] = entry;
|
|
p += 8;
|
|
p[-3] = entry;
|
|
p[-2] = entry;
|
|
p[-1] = entry;
|
|
} while (p != end);
|
|
}
|
|
|
|
#ifndef __PAGETABLE_PMD_FOLDED
|
|
void pmd_init(unsigned long addr, unsigned long pagetable)
|
|
{
|
|
unsigned long *p, *end;
|
|
|
|
p = (unsigned long *) addr;
|
|
end = p + PTRS_PER_PMD;
|
|
|
|
do {
|
|
p[0] = pagetable;
|
|
p[1] = pagetable;
|
|
p[2] = pagetable;
|
|
p[3] = pagetable;
|
|
p[4] = pagetable;
|
|
p += 8;
|
|
p[-3] = pagetable;
|
|
p[-2] = pagetable;
|
|
p[-1] = pagetable;
|
|
} while (p != end);
|
|
}
|
|
EXPORT_SYMBOL_GPL(pmd_init);
|
|
#endif
|
|
|
|
#ifndef __PAGETABLE_PUD_FOLDED
|
|
void pud_init(unsigned long addr, unsigned long pagetable)
|
|
{
|
|
unsigned long *p, *end;
|
|
|
|
p = (unsigned long *)addr;
|
|
end = p + PTRS_PER_PUD;
|
|
|
|
do {
|
|
p[0] = pagetable;
|
|
p[1] = pagetable;
|
|
p[2] = pagetable;
|
|
p[3] = pagetable;
|
|
p[4] = pagetable;
|
|
p += 8;
|
|
p[-3] = pagetable;
|
|
p[-2] = pagetable;
|
|
p[-1] = pagetable;
|
|
} while (p != end);
|
|
}
|
|
#endif
|
|
|
|
pmd_t mk_pmd(struct page *page, pgprot_t prot)
|
|
{
|
|
pmd_t pmd;
|
|
|
|
pmd_val(pmd) = (page_to_pfn(page) << _PFN_SHIFT) | pgprot_val(prot);
|
|
|
|
return pmd;
|
|
}
|
|
|
|
void set_pmd_at(struct mm_struct *mm, unsigned long addr,
|
|
pmd_t *pmdp, pmd_t pmd)
|
|
{
|
|
*pmdp = pmd;
|
|
flush_tlb_all();
|
|
}
|
|
|
|
void __init pagetable_init(void)
|
|
{
|
|
unsigned long vaddr;
|
|
pgd_t *pgd_base;
|
|
|
|
/* Initialize the entire pgd. */
|
|
pgd_init((unsigned long)swapper_pg_dir);
|
|
#ifndef __PAGETABLE_PUD_FOLDED
|
|
pud_init((unsigned long)invalid_pud_table, (unsigned long)invalid_pmd_table);
|
|
#endif
|
|
#ifndef __PAGETABLE_PMD_FOLDED
|
|
pmd_init((unsigned long)invalid_pmd_table, (unsigned long)invalid_pte_table);
|
|
#endif
|
|
pgd_base = swapper_pg_dir;
|
|
/*
|
|
* Fixed mappings:
|
|
*/
|
|
vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
|
|
fixrange_init(vaddr, vaddr + FIXADDR_SIZE, pgd_base);
|
|
}
|