mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-25 13:14:07 +08:00
powerpc fixes for 6.6 #6
- Fix boot crash with FLATMEM since set_ptes() introduction. - Avoid calling arch_enter/leave_lazy_mmu() in set_ptes(). Thanks to: Aneesh Kumar K.V, Erhard Furtner. -----BEGIN PGP SIGNATURE----- iQJHBAABCAAxFiEEJFGtCPCthwEv2Y/bUevqPMjhpYAFAmU7cGwTHG1wZUBlbGxl cm1hbi5pZC5hdQAKCRBR6+o8yOGlgGbkEACEuf1snnS76YYZgP4DnYg3ODAamSjL esOhFbkBviG9Y+2I0JsNqN4G0VAzydbLD6Ed2VW7gkzNqxYzsYEXrv8/ArxCu9Yb M7lliRo51bQz2hs+H0B8knihaF9e0ABt8EunksPiGpZPkiDA30fW12ZpxTkXNcM5 V01vAtKa+M1DIEXq2UTgu6rzB45Vx/cNt3vgvVtGFnQ3/v4WXANxMllPkFLRl6nK cF8J+xyiULQJ8mRVQ/TAPHOmqrYzVEGFaJ/XXk+KpfHRKdmR74IZI0rYb1DqOznw X9IwDG+Em7QWQFohHjbikY7avrcKbmjw/QnjQ8akQ7A1SqM8wydE4lXjg3X/fxz4 4tYoAaheD5U99zEgbCzM7Qsao6gmNXo4V0jMMD31nl447HIsbMK6Qh7wz8QOTael pgw6S71clHXKQufs/Uz2jFp6m8K6lMYmm+z23WIArYPNgs2vW+zcDPCXrU+c9AM5 yw8t6vupo5Af6KAQXo3H90A5Fsgza3cKtcN7wASMe5AEslYjy9dg3KHUs+ttTXKQ kUqG44DkHOu3gIugrrjHe1dQAOHwSpuGZ0oWJ847K+qL1HY8UNVrJ1Q0zmkEzqR/ 8MFdxciRyelAqInfLpIzuc/GXq8blmx8EVnVzTT9S6qWAHrErznX5OXrdhoLiaZe ZC8zR/F6nmVeSg== =OZrB -----END PGP SIGNATURE----- Merge tag 'powerpc-6.6-6' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux Pull powerpc fixes from Michael Ellerman: - Fix boot crash with FLATMEM since set_ptes() introduction - Avoid calling arch_enter/leave_lazy_mmu() in set_ptes() Thanks to Aneesh Kumar K.V and Erhard Furtner. * tag 'powerpc-6.6-6' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: powerpc/mm: Avoid calling arch_enter/leave_lazy_mmu() in set_ptes powerpc/mm: Fix boot crash with FLATMEM
This commit is contained in:
commit
09a4a03c07
@ -948,6 +948,8 @@ void __init setup_arch(char **cmdline_p)
|
||||
|
||||
/* Parse memory topology */
|
||||
mem_topology_setup();
|
||||
/* Set max_mapnr before paging_init() */
|
||||
set_max_mapnr(max_pfn);
|
||||
|
||||
/*
|
||||
* Release secondary cpus out of their spinloops at 0x60 now that
|
||||
|
@ -288,7 +288,6 @@ void __init mem_init(void)
|
||||
#endif
|
||||
|
||||
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
|
||||
set_max_mapnr(max_pfn);
|
||||
|
||||
kasan_late_init();
|
||||
|
||||
|
@ -104,6 +104,8 @@ static pte_t set_pte_filter_hash(pte_t pte) { return pte; }
|
||||
/* Embedded type MMU with HW exec support. This is a bit more complicated
|
||||
* as we don't have two bits to spare for _PAGE_EXEC and _PAGE_HWEXEC so
|
||||
* instead we "filter out" the exec permission for non clean pages.
|
||||
*
|
||||
* This is also called once for the folio. So only work with folio->flags here.
|
||||
*/
|
||||
static inline pte_t set_pte_filter(pte_t pte)
|
||||
{
|
||||
@ -190,29 +192,39 @@ static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma,
|
||||
void set_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
|
||||
pte_t pte, unsigned int nr)
|
||||
{
|
||||
/*
|
||||
* Make sure hardware valid bit is not set. We don't do
|
||||
* tlb flush for this update.
|
||||
*/
|
||||
VM_WARN_ON(pte_hw_valid(*ptep) && !pte_protnone(*ptep));
|
||||
|
||||
/* Note: mm->context.id might not yet have been assigned as
|
||||
* this context might not have been activated yet when this
|
||||
* is called.
|
||||
* is called. Filter the pte value and use the filtered value
|
||||
* to setup all the ptes in the range.
|
||||
*/
|
||||
pte = set_pte_filter(pte);
|
||||
|
||||
/* Perform the setting of the PTE */
|
||||
arch_enter_lazy_mmu_mode();
|
||||
/*
|
||||
* We don't need to call arch_enter/leave_lazy_mmu_mode()
|
||||
* because we expect set_ptes to be only be used on not present
|
||||
* and not hw_valid ptes. Hence there is no translation cache flush
|
||||
* involved that need to be batched.
|
||||
*/
|
||||
for (;;) {
|
||||
|
||||
/*
|
||||
* Make sure hardware valid bit is not set. We don't do
|
||||
* tlb flush for this update.
|
||||
*/
|
||||
VM_WARN_ON(pte_hw_valid(*ptep) && !pte_protnone(*ptep));
|
||||
|
||||
/* Perform the setting of the PTE */
|
||||
__set_pte_at(mm, addr, ptep, pte, 0);
|
||||
if (--nr == 0)
|
||||
break;
|
||||
ptep++;
|
||||
pte = __pte(pte_val(pte) + (1UL << PTE_RPN_SHIFT));
|
||||
addr += PAGE_SIZE;
|
||||
/*
|
||||
* increment the pfn.
|
||||
*/
|
||||
pte = pfn_pte(pte_pfn(pte) + 1, pte_pgprot((pte)));
|
||||
}
|
||||
arch_leave_lazy_mmu_mode();
|
||||
}
|
||||
|
||||
void unmap_kernel_page(unsigned long va)
|
||||
|
Loading…
Reference in New Issue
Block a user