mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-25 13:43:55 +08:00
x86/mm/pat: Ensure cpa->pfn only contains page frame numbers
The x86 pageattr code is confused about the data that is stored in cpa->pfn, sometimes it's treated as a page frame number, sometimes it's treated as an unshifted physical address, and in one place it's treated as a pte. The result of this is that the mapping functions do not map the intended physical address. This isn't a problem in practice because most of the addresses we're mapping in the EFI code paths are already mapped in 'trampoline_pgd' and so the pageattr mapping functions don't actually do anything in this case. But when we move to using a separate page table for the EFI runtime this will be an issue. Signed-off-by: Matt Fleming <matt@codeblueprint.co.uk> Reviewed-by: Borislav Petkov <bp@suse.de> Acked-by: Borislav Petkov <bp@suse.de> Cc: Andy Lutomirski <luto@amacapital.net> Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Sai Praneeth Prakhya <sai.praneeth.prakhya@intel.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Toshi Kani <toshi.kani@hp.com> Cc: linux-efi@vger.kernel.org Link: http://lkml.kernel.org/r/1448658575-17029-3-git-send-email-matt@codeblueprint.co.uk Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
21cdb6b568
commit
edc3b9129c
@ -905,15 +905,10 @@ static void populate_pte(struct cpa_data *cpa,
|
||||
pte = pte_offset_kernel(pmd, start);
|
||||
|
||||
while (num_pages-- && start < end) {
|
||||
|
||||
/* deal with the NX bit */
|
||||
if (!(pgprot_val(pgprot) & _PAGE_NX))
|
||||
cpa->pfn &= ~_PAGE_NX;
|
||||
|
||||
set_pte(pte, pfn_pte(cpa->pfn >> PAGE_SHIFT, pgprot));
|
||||
set_pte(pte, pfn_pte(cpa->pfn, pgprot));
|
||||
|
||||
start += PAGE_SIZE;
|
||||
cpa->pfn += PAGE_SIZE;
|
||||
cpa->pfn++;
|
||||
pte++;
|
||||
}
|
||||
}
|
||||
@ -969,11 +964,11 @@ static int populate_pmd(struct cpa_data *cpa,
|
||||
|
||||
pmd = pmd_offset(pud, start);
|
||||
|
||||
set_pmd(pmd, __pmd(cpa->pfn | _PAGE_PSE |
|
||||
set_pmd(pmd, __pmd(cpa->pfn << PAGE_SHIFT | _PAGE_PSE |
|
||||
massage_pgprot(pmd_pgprot)));
|
||||
|
||||
start += PMD_SIZE;
|
||||
cpa->pfn += PMD_SIZE;
|
||||
cpa->pfn += PMD_SIZE >> PAGE_SHIFT;
|
||||
cur_pages += PMD_SIZE >> PAGE_SHIFT;
|
||||
}
|
||||
|
||||
@ -1042,11 +1037,11 @@ static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd,
|
||||
* Map everything starting from the Gb boundary, possibly with 1G pages
|
||||
*/
|
||||
while (end - start >= PUD_SIZE) {
|
||||
set_pud(pud, __pud(cpa->pfn | _PAGE_PSE |
|
||||
set_pud(pud, __pud(cpa->pfn << PAGE_SHIFT | _PAGE_PSE |
|
||||
massage_pgprot(pud_pgprot)));
|
||||
|
||||
start += PUD_SIZE;
|
||||
cpa->pfn += PUD_SIZE;
|
||||
cpa->pfn += PUD_SIZE >> PAGE_SHIFT;
|
||||
cur_pages += PUD_SIZE >> PAGE_SHIFT;
|
||||
pud++;
|
||||
}
|
||||
|
@ -143,7 +143,7 @@ void efi_sync_low_kernel_mappings(void)
|
||||
|
||||
int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
|
||||
{
|
||||
unsigned long text;
|
||||
unsigned long pfn, text;
|
||||
struct page *page;
|
||||
unsigned npages;
|
||||
pgd_t *pgd;
|
||||
@ -160,7 +160,8 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
|
||||
* and ident-map those pages containing the map before calling
|
||||
* phys_efi_set_virtual_address_map().
|
||||
*/
|
||||
if (kernel_map_pages_in_pgd(pgd, pa_memmap, pa_memmap, num_pages, _PAGE_NX)) {
|
||||
pfn = pa_memmap >> PAGE_SHIFT;
|
||||
if (kernel_map_pages_in_pgd(pgd, pfn, pa_memmap, num_pages, _PAGE_NX)) {
|
||||
pr_err("Error ident-mapping new memmap (0x%lx)!\n", pa_memmap);
|
||||
return 1;
|
||||
}
|
||||
@ -185,8 +186,9 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
|
||||
|
||||
npages = (_end - _text) >> PAGE_SHIFT;
|
||||
text = __pa(_text);
|
||||
pfn = text >> PAGE_SHIFT;
|
||||
|
||||
if (kernel_map_pages_in_pgd(pgd, text >> PAGE_SHIFT, text, npages, 0)) {
|
||||
if (kernel_map_pages_in_pgd(pgd, pfn, text, npages, 0)) {
|
||||
pr_err("Failed to map kernel text 1:1\n");
|
||||
return 1;
|
||||
}
|
||||
@ -204,12 +206,14 @@ void __init efi_cleanup_page_tables(unsigned long pa_memmap, unsigned num_pages)
|
||||
static void __init __map_region(efi_memory_desc_t *md, u64 va)
|
||||
{
|
||||
pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd);
|
||||
unsigned long pf = 0;
|
||||
unsigned long flags = 0;
|
||||
unsigned long pfn;
|
||||
|
||||
if (!(md->attribute & EFI_MEMORY_WB))
|
||||
pf |= _PAGE_PCD;
|
||||
flags |= _PAGE_PCD;
|
||||
|
||||
if (kernel_map_pages_in_pgd(pgd, md->phys_addr, va, md->num_pages, pf))
|
||||
pfn = md->phys_addr >> PAGE_SHIFT;
|
||||
if (kernel_map_pages_in_pgd(pgd, pfn, va, md->num_pages, flags))
|
||||
pr_warn("Error mapping PA 0x%llx -> VA 0x%llx!\n",
|
||||
md->phys_addr, va);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user