mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-22 20:23:57 +08:00
Do not sync vmalloc/ioremap mappings on x86-64 kernels.
Hopefully now without the bugs! Signed-off-by: Ingo Molnar <mingo@kernel.org> -----BEGIN PGP SIGNATURE----- iQJFBAABCgAvFiEEBpT5eoXrXCwVQwEKEnMQ0APhK1gFAl+Ej8oRHG1pbmdvQGtl cm5lbC5vcmcACgkQEnMQ0APhK1jDeQ/9G1qMbMDef2XMIbQfHYbomDhzhZSZefZ4 hWuW5brGuQY7SoOT2VQVlifIRVMj61kfSAO1z68vXIFgyNWyx+gCXCun8V8548R+ E+/KFtPk4HlK1NAlSpz1wMlIe0OhW6fPLkAyty6w+iCAHORv6xPjZj6qhe38RzYS W9aEgPmL/13KylPndJGafRjmXbEgBZQWBchDcSW6TDOE/bwAeN+E7gghbGfXZWdc 8+RJ0nasrfFKHZ4qB1rnF2KX/mNpK6gd6kNhLFadH0vFQ4Q/IQ34sk3T3muzV6N8 x7Z85WTp5ewBJDPsJnoNs6tPKaKr88fVZl8+J81bjMFDCXkg0dDZRlIAG3X2miVV MuumqdBn34OvFTIFl8I4eydDCeIbZTKMAhveKx5I+dVxVX44ICJZNyVcfAtt96Fa Zq2NK3c52CsGwZWSqEQ1brSA1OKyZ1ny+ed1RJwYEFpKK1o8sha92L1MYhIkVcM4 L/5vO1kH7e5fPbWxeQd4a5580JRzSD3/SHCqnd1GcY1xgJv9x73kcSbhY0xCMifi 8SdZjNk2gzfKyCXzAcfVveYR3bmvG4LJyGRfc5arsxqHHg2eZD08SkakHUXxwMJ1 C4vIdyGLwos5bWYPMNMPOy7HYS4Jm+MrzK3bZfz6Q6ezM7bGTPiU73HnXUJaLz1V 7DuNKeS1i1Q= =tVAw -----END PGP SIGNATURE----- Merge tag 'x86-mm-2020-10-12' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull x86 mm updates from Ingo Molnar: "Do not sync vmalloc/ioremap mappings on x86-64 kernels. Hopefully now without the bugs!" * tag 'x86-mm-2020-10-12' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/mm/64: Update comment in preallocate_vmalloc_pages() x86/mm/64: Do not sync vmalloc/ioremap mappings
This commit is contained in:
commit
c1b4ec85ee
@ -159,6 +159,4 @@ extern unsigned int ptrs_per_p4d;
|
||||
|
||||
#define PGD_KERNEL_START ((PAGE_SIZE / 2) / sizeof(pgd_t))
|
||||
|
||||
#define ARCH_PAGE_TABLE_SYNC_MASK (pgtable_l5_enabled() ? PGTBL_PGD_MODIFIED : PGTBL_P4D_MODIFIED)
|
||||
|
||||
#endif /* _ASM_X86_PGTABLE_64_DEFS_H */
|
||||
|
@ -217,11 +217,6 @@ static void sync_global_pgds(unsigned long start, unsigned long end)
|
||||
sync_global_pgds_l4(start, end);
|
||||
}
|
||||
|
||||
void arch_sync_kernel_mappings(unsigned long start, unsigned long end)
|
||||
{
|
||||
sync_global_pgds(start, end);
|
||||
}
|
||||
|
||||
/*
|
||||
* NOTE: This function is marked __ref because it calls __init function
|
||||
* (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0.
|
||||
@ -1257,14 +1252,19 @@ static void __init preallocate_vmalloc_pages(void)
|
||||
if (!p4d)
|
||||
goto failed;
|
||||
|
||||
/*
|
||||
* With 5-level paging the P4D level is not folded. So the PGDs
|
||||
* are now populated and there is no need to walk down to the
|
||||
* PUD level.
|
||||
*/
|
||||
if (pgtable_l5_enabled())
|
||||
continue;
|
||||
|
||||
/*
|
||||
* The goal here is to allocate all possibly required
|
||||
* hardware page tables pointed to by the top hardware
|
||||
* level.
|
||||
*
|
||||
* On 4-level systems, the P4D layer is folded away and
|
||||
* the above code does no preallocation. Below, go down
|
||||
* to the pud _software_ level to ensure the second
|
||||
* hardware level is allocated on 4-level systems too.
|
||||
*/
|
||||
lvl = "pud";
|
||||
pud = pud_alloc(&init_mm, p4d, addr);
|
||||
if (!pud)
|
||||
|
Loading…
Reference in New Issue
Block a user