mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-13 22:14:20 +08:00
a686425b31
The software large page emulation on s390 did not clear the the pre-allocated page table in arch_release_hugepage() before freeing it. This could trigger the WARN_ON(!pte_none(*pte) in mm/vmalloc.c:106 and make vmap_pte_range() fail, because the page table could be reused in page_table_alloc(). This is fixed now by calling clear_table() before page_table_free(). Signed-off-by: Gerald Schaefer <gerald.schaefer@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
133 lines
2.5 KiB
C
133 lines
2.5 KiB
C
/*
|
|
* IBM System z Huge TLB Page Support for Kernel.
|
|
*
|
|
* Copyright 2007 IBM Corp.
|
|
* Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com>
|
|
*/
|
|
|
|
#include <linux/mm.h>
|
|
#include <linux/hugetlb.h>
|
|
|
|
|
|
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
|
|
pte_t *pteptr, pte_t pteval)
|
|
{
|
|
pmd_t *pmdp = (pmd_t *) pteptr;
|
|
unsigned long mask;
|
|
|
|
if (!MACHINE_HAS_HPAGE) {
|
|
pteptr = (pte_t *) pte_page(pteval)[1].index;
|
|
mask = pte_val(pteval) &
|
|
(_SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO);
|
|
pte_val(pteval) = (_SEGMENT_ENTRY + __pa(pteptr)) | mask;
|
|
}
|
|
|
|
pmd_val(*pmdp) = pte_val(pteval);
|
|
}
|
|
|
|
int arch_prepare_hugepage(struct page *page)
|
|
{
|
|
unsigned long addr = page_to_phys(page);
|
|
pte_t pte;
|
|
pte_t *ptep;
|
|
int i;
|
|
|
|
if (MACHINE_HAS_HPAGE)
|
|
return 0;
|
|
|
|
ptep = (pte_t *) pte_alloc_one(&init_mm, addr);
|
|
if (!ptep)
|
|
return -ENOMEM;
|
|
|
|
pte = mk_pte(page, PAGE_RW);
|
|
for (i = 0; i < PTRS_PER_PTE; i++) {
|
|
set_pte_at(&init_mm, addr + i * PAGE_SIZE, ptep + i, pte);
|
|
pte_val(pte) += PAGE_SIZE;
|
|
}
|
|
page[1].index = (unsigned long) ptep;
|
|
return 0;
|
|
}
|
|
|
|
void arch_release_hugepage(struct page *page)
|
|
{
|
|
pte_t *ptep;
|
|
|
|
if (MACHINE_HAS_HPAGE)
|
|
return;
|
|
|
|
ptep = (pte_t *) page[1].index;
|
|
if (!ptep)
|
|
return;
|
|
clear_table((unsigned long *) ptep, _PAGE_TYPE_EMPTY,
|
|
PTRS_PER_PTE * sizeof(pte_t));
|
|
page_table_free(&init_mm, (unsigned long *) ptep);
|
|
page[1].index = 0;
|
|
}
|
|
|
|
pte_t *huge_pte_alloc(struct mm_struct *mm,
|
|
unsigned long addr, unsigned long sz)
|
|
{
|
|
pgd_t *pgdp;
|
|
pud_t *pudp;
|
|
pmd_t *pmdp = NULL;
|
|
|
|
pgdp = pgd_offset(mm, addr);
|
|
pudp = pud_alloc(mm, pgdp, addr);
|
|
if (pudp)
|
|
pmdp = pmd_alloc(mm, pudp, addr);
|
|
return (pte_t *) pmdp;
|
|
}
|
|
|
|
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
|
|
{
|
|
pgd_t *pgdp;
|
|
pud_t *pudp;
|
|
pmd_t *pmdp = NULL;
|
|
|
|
pgdp = pgd_offset(mm, addr);
|
|
if (pgd_present(*pgdp)) {
|
|
pudp = pud_offset(pgdp, addr);
|
|
if (pud_present(*pudp))
|
|
pmdp = pmd_offset(pudp, addr);
|
|
}
|
|
return (pte_t *) pmdp;
|
|
}
|
|
|
|
int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
|
|
int write)
|
|
{
|
|
return ERR_PTR(-EINVAL);
|
|
}
|
|
|
|
int pmd_huge(pmd_t pmd)
|
|
{
|
|
if (!MACHINE_HAS_HPAGE)
|
|
return 0;
|
|
|
|
return !!(pmd_val(pmd) & _SEGMENT_ENTRY_LARGE);
|
|
}
|
|
|
|
int pud_huge(pud_t pud)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
|
|
pmd_t *pmdp, int write)
|
|
{
|
|
struct page *page;
|
|
|
|
if (!MACHINE_HAS_HPAGE)
|
|
return NULL;
|
|
|
|
page = pmd_page(*pmdp);
|
|
if (page)
|
|
page += ((address & ~HPAGE_MASK) >> PAGE_SHIFT);
|
|
return page;
|
|
}
|