mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-26 06:04:14 +08:00
powerpc/mm: don't use pte_alloc_kernel() until slab is available on PPC32
In the same way as PPC64, implement early allocation functions and avoid calling pte_alloc_kernel() before slab is available. Reviewed-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
This commit is contained in:
parent
627f06c6f5
commit
4a6d8cf900
@ -43,11 +43,8 @@ EXPORT_SYMBOL(ioremap_bot); /* aka VMALLOC_END */
|
|||||||
|
|
||||||
extern char etext[], _stext[], _sinittext[], _einittext[];
|
extern char etext[], _stext[], _sinittext[], _einittext[];
|
||||||
|
|
||||||
__ref pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
|
pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
|
||||||
{
|
{
|
||||||
if (!slab_is_available())
|
|
||||||
return memblock_alloc(PTE_FRAG_SIZE, PTE_FRAG_SIZE);
|
|
||||||
|
|
||||||
return (pte_t *)pte_fragment_alloc(mm, 1);
|
return (pte_t *)pte_fragment_alloc(mm, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -205,7 +202,29 @@ void iounmap(volatile void __iomem *addr)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(iounmap);
|
EXPORT_SYMBOL(iounmap);
|
||||||
|
|
||||||
int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot)
|
static void __init *early_alloc_pgtable(unsigned long size)
|
||||||
|
{
|
||||||
|
void *ptr = memblock_alloc(size, size);
|
||||||
|
|
||||||
|
if (!ptr)
|
||||||
|
panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
|
||||||
|
__func__, size, size);
|
||||||
|
|
||||||
|
return ptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
static pte_t __init *early_pte_alloc_kernel(pmd_t *pmdp, unsigned long va)
|
||||||
|
{
|
||||||
|
if (pmd_none(*pmdp)) {
|
||||||
|
pte_t *ptep = early_alloc_pgtable(PTE_FRAG_SIZE);
|
||||||
|
|
||||||
|
pmd_populate_kernel(&init_mm, pmdp, ptep);
|
||||||
|
}
|
||||||
|
return pte_offset_kernel(pmdp, va);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
int __ref map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot)
|
||||||
{
|
{
|
||||||
pmd_t *pd;
|
pmd_t *pd;
|
||||||
pte_t *pg;
|
pte_t *pg;
|
||||||
@ -214,7 +233,10 @@ int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot)
|
|||||||
/* Use upper 10 bits of VA to index the first level map */
|
/* Use upper 10 bits of VA to index the first level map */
|
||||||
pd = pmd_offset(pud_offset(pgd_offset_k(va), va), va);
|
pd = pmd_offset(pud_offset(pgd_offset_k(va), va), va);
|
||||||
/* Use middle 10 bits of VA to index the second-level map */
|
/* Use middle 10 bits of VA to index the second-level map */
|
||||||
pg = pte_alloc_kernel(pd, va);
|
if (likely(slab_is_available()))
|
||||||
|
pg = pte_alloc_kernel(pd, va);
|
||||||
|
else
|
||||||
|
pg = early_pte_alloc_kernel(pd, va);
|
||||||
if (pg != 0) {
|
if (pg != 0) {
|
||||||
err = 0;
|
err = 0;
|
||||||
/* The PTE should never be already set nor present in the
|
/* The PTE should never be already set nor present in the
|
||||||
|
Loading…
Reference in New Issue
Block a user