2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-28 07:04:00 +08:00

sparc64: Revert 16GB huge page support.

It overflows the amount of space available in the initial .text section
of trap handler assembler in some configurations, resulting in build
failures.

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2017-08-10 09:49:15 -07:00
parent 5389e239eb
commit 4d9fbf539b
9 changed files with 54 additions and 220 deletions

View File

@ -4,13 +4,6 @@
#include <asm/page.h>
#include <asm-generic/hugetlb.h>
#ifdef CONFIG_HUGETLB_PAGE
struct pud_huge_patch_entry {
unsigned int addr;
unsigned int insn;
};
extern struct pud_huge_patch_entry __pud_huge_patch, __pud_huge_patch_end;
#endif
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte);

View File

@ -17,7 +17,6 @@
#define HPAGE_SHIFT 23
#define REAL_HPAGE_SHIFT 22
#define HPAGE_16GB_SHIFT 34
#define HPAGE_2GB_SHIFT 31
#define HPAGE_256MB_SHIFT 28
#define HPAGE_64K_SHIFT 16
@ -29,7 +28,7 @@
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
#define REAL_HPAGE_PER_HPAGE (_AC(1,UL) << (HPAGE_SHIFT - REAL_HPAGE_SHIFT))
#define HUGE_MAX_HSTATE 5
#define HUGE_MAX_HSTATE 4
#endif
#ifndef __ASSEMBLY__

View File

@ -414,11 +414,6 @@ static inline bool is_hugetlb_pmd(pmd_t pmd)
return !!(pmd_val(pmd) & _PAGE_PMD_HUGE);
}
static inline bool is_hugetlb_pud(pud_t pud)
{
return !!(pud_val(pud) & _PAGE_PUD_HUGE);
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline pmd_t pmd_mkhuge(pmd_t pmd)
{
@ -692,8 +687,6 @@ static inline unsigned long pmd_write(pmd_t pmd)
return pte_write(pte);
}
#define pud_write(pud) pte_write(__pte(pud_val(pud)))
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline unsigned long pmd_dirty(pmd_t pmd)
{
@ -830,18 +823,9 @@ static inline unsigned long __pmd_page(pmd_t pmd)
return ((unsigned long) __va(pfn << PAGE_SHIFT));
}
static inline unsigned long pud_page_vaddr(pud_t pud)
{
pte_t pte = __pte(pud_val(pud));
unsigned long pfn;
pfn = pte_pfn(pte);
return ((unsigned long) __va(pfn << PAGE_SHIFT));
}
#define pmd_page(pmd) virt_to_page((void *)__pmd_page(pmd))
#define pud_page_vaddr(pud) \
((unsigned long) __va(pud_val(pud)))
#define pud_page(pud) virt_to_page((void *)pud_page_vaddr(pud))
#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0UL)
#define pud_present(pud) (pud_val(pud) != 0U)

View File

@ -195,41 +195,6 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
nop; \
699:
/* PUD has been loaded into REG1, interpret the value, seeing
* if it is a HUGE PUD or a normal one. If it is not valid
* then jump to FAIL_LABEL. If it is a HUGE PUD, and it
* translates to a valid PTE, branch to PTE_LABEL.
*
* We have to propagate bits [32:22] from the virtual address
* to resolve at 4M granularity.
*/
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
#define USER_PGTABLE_CHECK_PUD_HUGE(VADDR, REG1, REG2, FAIL_LABEL, PTE_LABEL) \
700: ba 700f; \
nop; \
.section .pud_huge_patch, "ax"; \
.word 700b; \
nop; \
.previous; \
brz,pn REG1, FAIL_LABEL; \
sethi %uhi(_PAGE_PUD_HUGE), REG2; \
sllx REG2, 32, REG2; \
andcc REG1, REG2, %g0; \
be,pt %xcc, 700f; \
sethi %hi(0x1ffc0000), REG2; \
sllx REG2, 1, REG2; \
brgez,pn REG1, FAIL_LABEL; \
andn REG1, REG2, REG1; \
and VADDR, REG2, REG2; \
brlz,pt REG1, PTE_LABEL; \
or REG1, REG2, REG1; \
700:
#else
#define USER_PGTABLE_CHECK_PUD_HUGE(VADDR, REG1, REG2, FAIL_LABEL, PTE_LABEL) \
brz,pn REG1, FAIL_LABEL; \
nop;
#endif
/* PMD has been loaded into REG1, interpret the value, seeing
* if it is a HUGE PMD or a normal one. If it is not valid
* then jump to FAIL_LABEL. If it is a HUGE PMD, and it
@ -277,7 +242,6 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
srlx REG2, 64 - PAGE_SHIFT, REG2; \
andn REG2, 0x7, REG2; \
ldxa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \
USER_PGTABLE_CHECK_PUD_HUGE(VADDR, REG1, REG2, FAIL_LABEL, 800f) \
brz,pn REG1, FAIL_LABEL; \
sllx VADDR, 64 - (PMD_SHIFT + PMD_BITS), REG2; \
srlx REG2, 64 - PAGE_SHIFT, REG2; \

View File

@ -117,7 +117,7 @@ tsb_miss_page_table_walk_sun4v_fastpath:
/* Valid PTE is now in %g5. */
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
sethi %uhi(_PAGE_PMD_HUGE | _PAGE_PUD_HUGE), %g7
sethi %uhi(_PAGE_PMD_HUGE), %g7
sllx %g7, 32, %g7
andcc %g5, %g7, %g0

View File

@ -154,11 +154,6 @@ SECTIONS
*(.get_tick_patch)
__get_tick_patch_end = .;
}
.pud_huge_patch : {
__pud_huge_patch = .;
*(.pud_huge_patch)
__pud_huge_patch_end = .;
}
PERCPU_SECTION(SMP_CACHE_BYTES)
#ifdef CONFIG_JUMP_LABEL

View File

@ -103,45 +103,6 @@ static int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
return 1;
}
static int gup_huge_pud(pud_t *pudp, pud_t pud, unsigned long addr,
unsigned long end, int write, struct page **pages,
int *nr)
{
struct page *head, *page;
int refs;
if (!(pud_val(pud) & _PAGE_VALID))
return 0;
if (write && !pud_write(pud))
return 0;
refs = 0;
page = pud_page(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
head = compound_head(page);
do {
VM_BUG_ON(compound_head(page) != head);
pages[*nr] = page;
(*nr)++;
page++;
refs++;
} while (addr += PAGE_SIZE, addr != end);
if (!page_cache_add_speculative(head, refs)) {
*nr -= refs;
return 0;
}
if (unlikely(pud_val(pud) != pud_val(*pudp))) {
*nr -= refs;
while (refs--)
put_page(head);
return 0;
}
return 1;
}
static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
int write, struct page **pages, int *nr)
{
@ -180,11 +141,7 @@ static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
next = pud_addr_end(addr, end);
if (pud_none(pud))
return 0;
if (unlikely(pud_large(pud))) {
if (!gup_huge_pud(pudp, pud, addr, next,
write, pages, nr))
return 0;
} else if (!gup_pmd_range(pud, addr, next, write, pages, nr))
if (!gup_pmd_range(pud, addr, next, write, pages, nr))
return 0;
} while (pudp++, addr = next, addr != end);

View File

@ -143,10 +143,6 @@ static pte_t sun4v_hugepage_shift_to_tte(pte_t entry, unsigned int shift)
pte_val(entry) = pte_val(entry) & ~_PAGE_SZALL_4V;
switch (shift) {
case HPAGE_16GB_SHIFT:
hugepage_size = _PAGE_SZ16GB_4V;
pte_val(entry) |= _PAGE_PUD_HUGE;
break;
case HPAGE_2GB_SHIFT:
hugepage_size = _PAGE_SZ2GB_4V;
pte_val(entry) |= _PAGE_PMD_HUGE;
@ -191,9 +187,6 @@ static unsigned int sun4v_huge_tte_to_shift(pte_t entry)
unsigned int shift;
switch (tte_szbits) {
case _PAGE_SZ16GB_4V:
shift = HPAGE_16GB_SHIFT;
break;
case _PAGE_SZ2GB_4V:
shift = HPAGE_2GB_SHIFT;
break;
@ -266,19 +259,22 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte = NULL;
pgd = pgd_offset(mm, addr);
pud = pud_alloc(mm, pgd, addr);
if (!pud)
return NULL;
if (sz >= PUD_SIZE)
return (pte_t *)pud;
pmd = pmd_alloc(mm, pud, addr);
if (!pmd)
return NULL;
if (sz >= PMD_SIZE)
return (pte_t *)pmd;
return pte_alloc_map(mm, pmd, addr);
if (pud) {
pmd = pmd_alloc(mm, pud, addr);
if (!pmd)
return NULL;
if (sz >= PMD_SIZE)
pte = (pte_t *)pmd;
else
pte = pte_alloc_map(mm, pmd, addr);
}
return pte;
}
pte_t *huge_pte_offset(struct mm_struct *mm,
@ -287,40 +283,34 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte = NULL;
pgd = pgd_offset(mm, addr);
if (pgd_none(*pgd))
return NULL;
pud = pud_offset(pgd, addr);
if (pud_none(*pud))
return NULL;
if (is_hugetlb_pud(*pud))
return (pte_t *)pud;
pmd = pmd_offset(pud, addr);
if (pmd_none(*pmd))
return NULL;
if (is_hugetlb_pmd(*pmd))
return (pte_t *)pmd;
return pte_offset_map(pmd, addr);
if (!pgd_none(*pgd)) {
pud = pud_offset(pgd, addr);
if (!pud_none(*pud)) {
pmd = pmd_offset(pud, addr);
if (!pmd_none(*pmd)) {
if (is_hugetlb_pmd(*pmd))
pte = (pte_t *)pmd;
else
pte = pte_offset_map(pmd, addr);
}
}
}
return pte;
}
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t entry)
{
unsigned int nptes, orig_shift, shift;
unsigned long i, size;
unsigned int i, nptes, orig_shift, shift;
unsigned long size;
pte_t orig;
size = huge_tte_to_size(entry);
shift = PAGE_SHIFT;
if (size >= PUD_SIZE)
shift = PUD_SHIFT;
else if (size >= PMD_SIZE)
shift = PMD_SHIFT;
else
shift = PAGE_SHIFT;
shift = size >= HPAGE_SIZE ? PMD_SHIFT : PAGE_SHIFT;
nptes = size >> shift;
if (!pte_present(*ptep) && pte_present(entry))
@ -343,23 +333,19 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
unsigned int i, nptes, orig_shift, shift;
unsigned int i, nptes, hugepage_shift;
unsigned long size;
pte_t entry;
entry = *ptep;
size = huge_tte_to_size(entry);
shift = PAGE_SHIFT;
if (size >= PUD_SIZE)
shift = PUD_SHIFT;
else if (size >= PMD_SIZE)
shift = PMD_SHIFT;
if (size >= HPAGE_SIZE)
nptes = size >> PMD_SHIFT;
else
shift = PAGE_SHIFT;
nptes = size >> PAGE_SHIFT;
nptes = size >> shift;
orig_shift = pte_none(entry) ? PAGE_SHIFT : huge_tte_to_shift(entry);
hugepage_shift = pte_none(entry) ? PAGE_SHIFT :
huge_tte_to_shift(entry);
if (pte_present(entry))
mm->context.hugetlb_pte_count -= nptes;
@ -368,11 +354,11 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
for (i = 0; i < nptes; i++)
ptep[i] = __pte(0UL);
maybe_tlb_batch_add(mm, addr, ptep, entry, 0, orig_shift);
maybe_tlb_batch_add(mm, addr, ptep, entry, 0, hugepage_shift);
/* An HPAGE_SIZE'ed page is composed of two REAL_HPAGE_SIZE'ed pages */
if (size == HPAGE_SIZE)
maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, entry, 0,
orig_shift);
hugepage_shift);
return entry;
}
@ -385,8 +371,7 @@ int pmd_huge(pmd_t pmd)
int pud_huge(pud_t pud)
{
return !pud_none(pud) &&
(pud_val(pud) & (_PAGE_VALID|_PAGE_PUD_HUGE)) != _PAGE_VALID;
return 0;
}
static void hugetlb_free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
@ -450,11 +435,8 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
next = pud_addr_end(addr, end);
if (pud_none_or_clear_bad(pud))
continue;
if (is_hugetlb_pud(*pud))
pud_clear(pud);
else
hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
ceiling);
hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
ceiling);
} while (pud++, addr = next, addr != end);
start &= PGDIR_MASK;

View File

@ -325,18 +325,6 @@ static void __update_mmu_tsb_insert(struct mm_struct *mm, unsigned long tsb_inde
}
#ifdef CONFIG_HUGETLB_PAGE
static void __init pud_huge_patch(void)
{
struct pud_huge_patch_entry *p;
unsigned long addr;
p = &__pud_huge_patch;
addr = p->addr;
*(unsigned int *)addr = p->insn;
__asm__ __volatile__("flush %0" : : "r" (addr));
}
static int __init setup_hugepagesz(char *string)
{
unsigned long long hugepage_size;
@ -349,11 +337,6 @@ static int __init setup_hugepagesz(char *string)
hugepage_shift = ilog2(hugepage_size);
switch (hugepage_shift) {
case HPAGE_16GB_SHIFT:
hv_pgsz_mask = HV_PGSZ_MASK_16GB;
hv_pgsz_idx = HV_PGSZ_IDX_16GB;
pud_huge_patch();
break;
case HPAGE_2GB_SHIFT:
hv_pgsz_mask = HV_PGSZ_MASK_2GB;
hv_pgsz_idx = HV_PGSZ_IDX_2GB;
@ -394,7 +377,6 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *
{
struct mm_struct *mm;
unsigned long flags;
bool is_huge_tsb;
pte_t pte = *ptep;
if (tlb_type != hypervisor) {
@ -412,37 +394,15 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *
spin_lock_irqsave(&mm->context.lock, flags);
is_huge_tsb = false;
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
if (mm->context.hugetlb_pte_count || mm->context.thp_pte_count) {
unsigned long hugepage_size = PAGE_SIZE;
if (is_vm_hugetlb_page(vma))
hugepage_size = huge_page_size(hstate_vma(vma));
if (hugepage_size >= PUD_SIZE) {
unsigned long mask = 0x1ffc00000UL;
/* Transfer bits [32:22] from address to resolve
* at 4M granularity.
*/
pte_val(pte) &= ~mask;
pte_val(pte) |= (address & mask);
} else if (hugepage_size >= PMD_SIZE) {
/* We are fabricating 8MB pages using 4MB
* real hw pages.
*/
pte_val(pte) |= (address & (1UL << REAL_HPAGE_SHIFT));
}
if (hugepage_size >= PMD_SIZE) {
__update_mmu_tsb_insert(mm, MM_TSB_HUGE,
REAL_HPAGE_SHIFT, address, pte_val(pte));
is_huge_tsb = true;
}
}
if ((mm->context.hugetlb_pte_count || mm->context.thp_pte_count) &&
is_hugetlb_pmd(__pmd(pte_val(pte)))) {
/* We are fabricating 8MB pages using 4MB real hw pages. */
pte_val(pte) |= (address & (1UL << REAL_HPAGE_SHIFT));
__update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT,
address, pte_val(pte));
} else
#endif
if (!is_huge_tsb)
__update_mmu_tsb_insert(mm, MM_TSB_BASE, PAGE_SHIFT,
address, pte_val(pte));