mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-18 02:04:05 +08:00
asm-generic/tlb: Track which levels of the page tables have been cleared
It is common for architectures with hugepage support to require only a single TLB invalidation operation per hugepage during unmap(), rather than iterating through the mapping at a PAGE_SIZE increment. Currently, however, the level in the page table where the unmap() operation occurs is not stored in the mmu_gather structure, therefore forcing architectures to issue additional TLB invalidation operations or to give up and over-invalidate by e.g. invalidating the entire TLB. Ideally, we could add an interval rbtree to the mmu_gather structure, which would allow us to associate the correct mapping granule with the various sub-mappings within the range being invalidated. However, this is costly in terms of book-keeping and memory management, so instead we approximate by keeping track of the page table levels that are cleared and provide a means to query the smallest granule required for invalidation. Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
This commit is contained in:
parent
22a61c3c4f
commit
a6d60245d6
@ -116,6 +116,14 @@ struct mmu_gather {
|
||||
*/
|
||||
unsigned int freed_tables : 1;
|
||||
|
||||
/*
|
||||
* at which levels have we cleared entries?
|
||||
*/
|
||||
unsigned int cleared_ptes : 1;
|
||||
unsigned int cleared_pmds : 1;
|
||||
unsigned int cleared_puds : 1;
|
||||
unsigned int cleared_p4ds : 1;
|
||||
|
||||
struct mmu_gather_batch *active;
|
||||
struct mmu_gather_batch local;
|
||||
struct page *__pages[MMU_GATHER_BUNDLE];
|
||||
@ -150,6 +158,10 @@ static inline void __tlb_reset_range(struct mmu_gather *tlb)
|
||||
tlb->end = 0;
|
||||
}
|
||||
tlb->freed_tables = 0;
|
||||
tlb->cleared_ptes = 0;
|
||||
tlb->cleared_pmds = 0;
|
||||
tlb->cleared_puds = 0;
|
||||
tlb->cleared_p4ds = 0;
|
||||
}
|
||||
|
||||
static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
|
||||
@ -199,6 +211,25 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline unsigned long tlb_get_unmap_shift(struct mmu_gather *tlb)
|
||||
{
|
||||
if (tlb->cleared_ptes)
|
||||
return PAGE_SHIFT;
|
||||
if (tlb->cleared_pmds)
|
||||
return PMD_SHIFT;
|
||||
if (tlb->cleared_puds)
|
||||
return PUD_SHIFT;
|
||||
if (tlb->cleared_p4ds)
|
||||
return P4D_SHIFT;
|
||||
|
||||
return PAGE_SHIFT;
|
||||
}
|
||||
|
||||
static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb)
|
||||
{
|
||||
return 1UL << tlb_get_unmap_shift(tlb);
|
||||
}
|
||||
|
||||
/*
|
||||
* In the case of tlb vma handling, we can optimise these away in the
|
||||
* case where we're doing a full MM flush. When we're doing a munmap,
|
||||
@ -232,13 +263,19 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
|
||||
#define tlb_remove_tlb_entry(tlb, ptep, address) \
|
||||
do { \
|
||||
__tlb_adjust_range(tlb, address, PAGE_SIZE); \
|
||||
tlb->cleared_ptes = 1; \
|
||||
__tlb_remove_tlb_entry(tlb, ptep, address); \
|
||||
} while (0)
|
||||
|
||||
#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
|
||||
do { \
|
||||
__tlb_adjust_range(tlb, address, huge_page_size(h)); \
|
||||
__tlb_remove_tlb_entry(tlb, ptep, address); \
|
||||
#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
|
||||
do { \
|
||||
unsigned long _sz = huge_page_size(h); \
|
||||
__tlb_adjust_range(tlb, address, _sz); \
|
||||
if (_sz == PMD_SIZE) \
|
||||
tlb->cleared_pmds = 1; \
|
||||
else if (_sz == PUD_SIZE) \
|
||||
tlb->cleared_puds = 1; \
|
||||
__tlb_remove_tlb_entry(tlb, ptep, address); \
|
||||
} while (0)
|
||||
|
||||
/**
|
||||
@ -252,6 +289,7 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
|
||||
#define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \
|
||||
do { \
|
||||
__tlb_adjust_range(tlb, address, HPAGE_PMD_SIZE); \
|
||||
tlb->cleared_pmds = 1; \
|
||||
__tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \
|
||||
} while (0)
|
||||
|
||||
@ -266,6 +304,7 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
|
||||
#define tlb_remove_pud_tlb_entry(tlb, pudp, address) \
|
||||
do { \
|
||||
__tlb_adjust_range(tlb, address, HPAGE_PUD_SIZE); \
|
||||
tlb->cleared_puds = 1; \
|
||||
__tlb_remove_pud_tlb_entry(tlb, pudp, address); \
|
||||
} while (0)
|
||||
|
||||
@ -291,7 +330,8 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
|
||||
#define pte_free_tlb(tlb, ptep, address) \
|
||||
do { \
|
||||
__tlb_adjust_range(tlb, address, PAGE_SIZE); \
|
||||
tlb->freed_tables = 1; \
|
||||
tlb->freed_tables = 1; \
|
||||
tlb->cleared_pmds = 1; \
|
||||
__pte_free_tlb(tlb, ptep, address); \
|
||||
} while (0)
|
||||
#endif
|
||||
@ -300,7 +340,8 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
|
||||
#define pmd_free_tlb(tlb, pmdp, address) \
|
||||
do { \
|
||||
__tlb_adjust_range(tlb, address, PAGE_SIZE); \
|
||||
tlb->freed_tables = 1; \
|
||||
tlb->freed_tables = 1; \
|
||||
tlb->cleared_puds = 1; \
|
||||
__pmd_free_tlb(tlb, pmdp, address); \
|
||||
} while (0)
|
||||
#endif
|
||||
@ -310,7 +351,8 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
|
||||
#define pud_free_tlb(tlb, pudp, address) \
|
||||
do { \
|
||||
__tlb_adjust_range(tlb, address, PAGE_SIZE); \
|
||||
tlb->freed_tables = 1; \
|
||||
tlb->freed_tables = 1; \
|
||||
tlb->cleared_p4ds = 1; \
|
||||
__pud_free_tlb(tlb, pudp, address); \
|
||||
} while (0)
|
||||
#endif
|
||||
@ -321,7 +363,7 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
|
||||
#define p4d_free_tlb(tlb, pudp, address) \
|
||||
do { \
|
||||
__tlb_adjust_range(tlb, address, PAGE_SIZE); \
|
||||
tlb->freed_tables = 1; \
|
||||
tlb->freed_tables = 1; \
|
||||
__p4d_free_tlb(tlb, pudp, address); \
|
||||
} while (0)
|
||||
#endif
|
||||
|
@ -267,8 +267,10 @@ void arch_tlb_finish_mmu(struct mmu_gather *tlb,
|
||||
{
|
||||
struct mmu_gather_batch *batch, *next;
|
||||
|
||||
if (force)
|
||||
if (force) {
|
||||
__tlb_reset_range(tlb);
|
||||
__tlb_adjust_range(tlb, start, end - start);
|
||||
}
|
||||
|
||||
tlb_flush_mmu(tlb);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user