xtensa: use buddy allocator for PTE table

At the moment xtensa uses slab allocator for PTE table.  It doesn't work
with enabled split page table lock: slab uses page->slab_cache and
page->first_page for its pages.  These fields share stroage with
page->ptl.

Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Chris Zankel <chris@zankel.net>
Acked-by: Max Filippov <jcmvbkbc@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Kirill A. Shutemov 2013-11-14 14:31:50 -08:00 committed by Linus Torvalds
parent 01058e7076
commit f820e2805c
3 changed files with 13 additions and 30 deletions

View File

@ -38,14 +38,18 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
free_page((unsigned long)pgd);
}
/* Use a slab cache for the pte pages (see also sparc64 implementation) */
extern struct kmem_cache *pgtable_cache;
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
{
return kmem_cache_alloc(pgtable_cache, GFP_KERNEL|__GFP_REPEAT);
pte_t *ptep;
int i;
ptep = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
if (!ptep)
return NULL;
for (i = 0; i < 1024; i++)
pte_clear(NULL, 0, ptep + i);
return ptep;
}
static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
@ -59,7 +63,7 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
return NULL;
page = virt_to_page(pte);
if (!pgtable_page_ctor(page)) {
kmem_cache_free(pgtable_cache, pte);
__free_page(page);
return NULL;
}
return page;
@ -67,13 +71,13 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
kmem_cache_free(pgtable_cache, pte);
free_page((unsigned long)pte);
}
static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
{
pgtable_page_dtor(pte);
kmem_cache_free(pgtable_cache, page_address(pte));
__free_page(pte);
}
#define pmd_pgtable(pmd) pmd_page(pmd)

View File

@ -220,12 +220,11 @@ extern unsigned long empty_zero_page[1024];
#ifdef CONFIG_MMU
extern pgd_t swapper_pg_dir[PAGE_SIZE/sizeof(pgd_t)];
extern void paging_init(void);
extern void pgtable_cache_init(void);
#else
# define swapper_pg_dir NULL
static inline void paging_init(void) { }
static inline void pgtable_cache_init(void) { }
#endif
static inline void pgtable_cache_init(void) { }
/*
* The pmd contains the kernel virtual address of the pte page.

View File

@ -50,23 +50,3 @@ void __init init_mmu(void)
*/
set_ptevaddr_register(PGTABLE_START);
}
struct kmem_cache *pgtable_cache __read_mostly;
static void pgd_ctor(void *addr)
{
pte_t *ptep = (pte_t *)addr;
int i;
for (i = 0; i < 1024; i++, ptep++)
pte_clear(NULL, 0, ptep);
}
void __init pgtable_cache_init(void)
{
pgtable_cache = kmem_cache_create("pgd",
PAGE_SIZE, PAGE_SIZE,
SLAB_HWCACHE_ALIGN,
pgd_ctor);
}