mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-16 16:54:20 +08:00
d6182fbf04
Because the x86_64 architecture does not enforce segment limits, Xen cannot protect itself with them as it does in 32-bit mode. Therefore, to protect itself, it runs the guest kernel in ring 3. Since it also runs the guest userspace in ring3, the guest kernel must maintain a second pagetable for its userspace, which does not map kernel space. Naturally, the guest kernel pagetables map both kernel and userspace. The userspace pagetable is attached to the corresponding kernel pagetable via the pgd's page->private field. It is allocated and freed at the same time as the kernel pgd via the paravirt_pgd_alloc/free hooks. Fortunately, the user pagetable is almost entirely shared with the kernel pagetable; the only difference is the pgd page itself. set_pgd will populate all entries in the kernel pagetable, and also set the corresponding user pgd entry if the address is less than STACK_TOP_MAX. The user pagetable must be pinned and unpinned with the kernel one, but because the pagetables are aliased, pgd_walk() only needs to be called on the kernel pagetable. The user pgd page is then pinned/unpinned along with the kernel pgd page. xen_write_cr3 must write both the kernel and user cr3s. The init_mm.pgd pagetable never has a user pagetable allocated for it, because it can never be used while running usermode. One awkward area is that early in boot the page structures are not available. No user pagetable can exist at that point, but it complicates the logic to avoid looking at the page structure. Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> Cc: Stephen Tweedie <sct@redhat.com> Cc: Eduardo Habkost <ehabkost@redhat.com> Cc: Mark McLoughlin <markmc@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
61 lines
1.6 KiB
C
61 lines
1.6 KiB
C
#ifndef _XEN_MMU_H
|
|
|
|
#include <linux/linkage.h>
|
|
#include <asm/page.h>
|
|
|
|
enum pt_level {
|
|
PT_PGD,
|
|
PT_PUD,
|
|
PT_PMD,
|
|
PT_PTE
|
|
};
|
|
|
|
|
|
void set_pte_mfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags);
|
|
|
|
|
|
void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next);
|
|
void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm);
|
|
void xen_exit_mmap(struct mm_struct *mm);
|
|
|
|
void xen_pgd_pin(pgd_t *pgd);
|
|
//void xen_pgd_unpin(pgd_t *pgd);
|
|
|
|
pteval_t xen_pte_val(pte_t);
|
|
pmdval_t xen_pmd_val(pmd_t);
|
|
pgdval_t xen_pgd_val(pgd_t);
|
|
|
|
pte_t xen_make_pte(pteval_t);
|
|
pmd_t xen_make_pmd(pmdval_t);
|
|
pgd_t xen_make_pgd(pgdval_t);
|
|
|
|
void xen_set_pte(pte_t *ptep, pte_t pteval);
|
|
void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
|
|
pte_t *ptep, pte_t pteval);
|
|
|
|
#ifdef CONFIG_X86_PAE
|
|
void xen_set_pte_atomic(pte_t *ptep, pte_t pte);
|
|
void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
|
|
void xen_pmd_clear(pmd_t *pmdp);
|
|
#endif /* CONFIG_X86_PAE */
|
|
|
|
void xen_set_pmd(pmd_t *pmdp, pmd_t pmdval);
|
|
void xen_set_pud(pud_t *ptr, pud_t val);
|
|
void xen_set_pmd_hyper(pmd_t *pmdp, pmd_t pmdval);
|
|
void xen_set_pud_hyper(pud_t *ptr, pud_t val);
|
|
|
|
#if PAGETABLE_LEVELS == 4
|
|
pudval_t xen_pud_val(pud_t pud);
|
|
pud_t xen_make_pud(pudval_t pudval);
|
|
void xen_set_pgd(pgd_t *pgdp, pgd_t pgd);
|
|
void xen_set_pgd_hyper(pgd_t *pgdp, pgd_t pgd);
|
|
#endif
|
|
|
|
pgd_t *xen_get_user_pgd(pgd_t *pgd);
|
|
|
|
pte_t xen_ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
|
|
void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
|
|
pte_t *ptep, pte_t pte);
|
|
|
|
#endif /* _XEN_MMU_H */
|