mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-29 15:43:59 +08:00
621b195515
This change adds support for a new "super" bit in the PTE, using the new arch_make_huge_pte() method. The Tilera hypervisor sees the bit set at a given level of the page table and gangs together 4, 16, or 64 consecutive pages from that level of the hierarchy to create a larger TLB entry. One extra "super" page size can be specified at each of the three levels of the page table hierarchy on tilegx, using the "hugepagesz" argument on the boot command line. A new hypervisor API is added to allow Linux to tell the hypervisor how many PTEs to gang together at each level of the page table. To allow pre-allocating huge pages larger than the buddy allocator can handle, this change modifies the Tilera bootmem support to put all of memory on tilegx platforms into bootmem. As part of this change I eliminate the vestigial CONFIG_HIGHPTE support, which never worked anyway, and eliminate the hv_page_size() API in favor of the standard vma_kernel_pagesize() API. Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
124 lines
4.0 KiB
C
124 lines
4.0 KiB
C
/*
|
|
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
|
*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU General Public License
|
|
* as published by the Free Software Foundation, version 2.
|
|
*
|
|
* This program is distributed in the hope that it will be useful, but
|
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
* NON INFRINGEMENT. See the GNU General Public License for
|
|
* more details.
|
|
*/
|
|
|
|
#ifndef _ASM_TILE_TLBFLUSH_H
|
|
#define _ASM_TILE_TLBFLUSH_H
|
|
|
|
#include <linux/mm.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/smp.h>
|
|
#include <asm/cacheflush.h>
|
|
#include <asm/page.h>
|
|
#include <hv/hypervisor.h>
|
|
|
|
/*
|
|
* Rather than associating each mm with its own ASID, we just use
|
|
* ASIDs to allow us to lazily flush the TLB when we switch mms.
|
|
* This way we only have to do an actual TLB flush on mm switch
|
|
* every time we wrap ASIDs, not every single time we switch.
|
|
*
|
|
* FIXME: We might improve performance by keeping ASIDs around
|
|
* properly, though since the hypervisor direct-maps VAs to TSB
|
|
* entries, we're likely to have lost at least the executable page
|
|
* mappings by the time we switch back to the original mm.
|
|
*/
|
|
DECLARE_PER_CPU(int, current_asid);
|
|
|
|
/* The hypervisor tells us what ASIDs are available to us. */
|
|
extern int min_asid, max_asid;
|
|
|
|
/* Pass as vma pointer for non-executable mapping, if no vma available. */
|
|
#define FLUSH_NONEXEC ((struct vm_area_struct *)-1UL)
|
|
|
|
/* Flush a single user page on this cpu. */
|
|
static inline void local_flush_tlb_page(struct vm_area_struct *vma,
|
|
unsigned long addr,
|
|
unsigned long page_size)
|
|
{
|
|
int rc = hv_flush_page(addr, page_size);
|
|
if (rc < 0)
|
|
panic("hv_flush_page(%#lx,%#lx) failed: %d",
|
|
addr, page_size, rc);
|
|
if (!vma || (vma != FLUSH_NONEXEC && (vma->vm_flags & VM_EXEC)))
|
|
__flush_icache();
|
|
}
|
|
|
|
/* Flush range of user pages on this cpu. */
|
|
static inline void local_flush_tlb_pages(struct vm_area_struct *vma,
|
|
unsigned long addr,
|
|
unsigned long page_size,
|
|
unsigned long len)
|
|
{
|
|
int rc = hv_flush_pages(addr, page_size, len);
|
|
if (rc < 0)
|
|
panic("hv_flush_pages(%#lx,%#lx,%#lx) failed: %d",
|
|
addr, page_size, len, rc);
|
|
if (!vma || (vma != FLUSH_NONEXEC && (vma->vm_flags & VM_EXEC)))
|
|
__flush_icache();
|
|
}
|
|
|
|
/* Flush all user pages on this cpu. */
|
|
static inline void local_flush_tlb(void)
|
|
{
|
|
int rc = hv_flush_all(1); /* preserve global mappings */
|
|
if (rc < 0)
|
|
panic("hv_flush_all(1) failed: %d", rc);
|
|
__flush_icache();
|
|
}
|
|
|
|
/*
|
|
* Global pages have to be flushed a bit differently. Not a real
|
|
* performance problem because this does not happen often.
|
|
*/
|
|
static inline void local_flush_tlb_all(void)
|
|
{
|
|
int i;
|
|
for (i = 0; ; ++i) {
|
|
HV_VirtAddrRange r = hv_inquire_virtual(i);
|
|
if (r.size == 0)
|
|
break;
|
|
local_flush_tlb_pages(NULL, r.start, PAGE_SIZE, r.size);
|
|
local_flush_tlb_pages(NULL, r.start, HPAGE_SIZE, r.size);
|
|
}
|
|
}
|
|
|
|
/*
|
|
* TLB flushing:
|
|
*
|
|
* - flush_tlb() flushes the current mm struct TLBs
|
|
* - flush_tlb_all() flushes all processes TLBs
|
|
* - flush_tlb_mm(mm) flushes the specified mm context TLB's
|
|
* - flush_tlb_page(vma, vmaddr) flushes one page
|
|
* - flush_tlb_range(vma, start, end) flushes a range of pages
|
|
* - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
|
|
* - flush_tlb_others(cpumask, mm, va) flushes TLBs on other cpus
|
|
*
|
|
* Here (as in vm_area_struct), "end" means the first byte after
|
|
* our end address.
|
|
*/
|
|
|
|
extern void flush_tlb_all(void);
|
|
extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
|
|
extern void flush_tlb_current_task(void);
|
|
extern void flush_tlb_mm(struct mm_struct *);
|
|
extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
|
|
extern void flush_tlb_page_mm(struct vm_area_struct *,
|
|
struct mm_struct *, unsigned long);
|
|
extern void flush_tlb_range(struct vm_area_struct *,
|
|
unsigned long start, unsigned long end);
|
|
|
|
#define flush_tlb() flush_tlb_current_task()
|
|
|
|
#endif /* _ASM_TILE_TLBFLUSH_H */
|