mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-15 00:04:15 +08:00
s390/mm: introduce ptep_flush_lazy helper
Isolate the logic of IDTE vs. IPTE flushing of ptes in two functions, ptep_flush_lazy and __tlb_flush_mm_lazy. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
This commit is contained in:
parent
b6bed093f4
commit
5c474a1e22
@ -77,8 +77,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
||||
WARN_ON(atomic_read(&prev->context.attach_count) < 0);
|
||||
atomic_inc(&next->context.attach_count);
|
||||
/* Check for TLBs not flushed yet */
|
||||
if (next->context.flush_mm)
|
||||
__tlb_flush_mm(next);
|
||||
__tlb_flush_mm_lazy(next);
|
||||
}
|
||||
|
||||
#define enter_lazy_tlb(mm,tsk) do { } while (0)
|
||||
|
@ -414,12 +414,6 @@ extern unsigned long MODULES_END;
|
||||
#define SEGMENT_READ __pgprot(_SEGMENT_ENTRY_PROTECT)
|
||||
#define SEGMENT_WRITE __pgprot(0)
|
||||
|
||||
static inline int mm_exclusive(struct mm_struct *mm)
|
||||
{
|
||||
return likely(mm == current->active_mm &&
|
||||
atomic_read(&mm->context.attach_count) <= 1);
|
||||
}
|
||||
|
||||
static inline int mm_has_pgste(struct mm_struct *mm)
|
||||
{
|
||||
#ifdef CONFIG_PGSTE
|
||||
@ -1037,6 +1031,17 @@ static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
|
||||
}
|
||||
}
|
||||
|
||||
static inline void ptep_flush_lazy(struct mm_struct *mm,
|
||||
unsigned long address, pte_t *ptep)
|
||||
{
|
||||
int active = (mm == current->active_mm) ? 1 : 0;
|
||||
|
||||
if (atomic_read(&mm->context.attach_count) > active)
|
||||
__ptep_ipte(address, ptep);
|
||||
else
|
||||
mm->context.flush_mm = 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* This is hard to understand. ptep_get_and_clear and ptep_clear_flush
|
||||
* both clear the TLB for the unmapped pte. The reason is that
|
||||
@ -1057,15 +1062,13 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
|
||||
pgste_t pgste;
|
||||
pte_t pte;
|
||||
|
||||
mm->context.flush_mm = 1;
|
||||
if (mm_has_pgste(mm)) {
|
||||
pgste = pgste_get_lock(ptep);
|
||||
pgste = pgste_ipte_notify(mm, address, ptep, pgste);
|
||||
}
|
||||
|
||||
pte = *ptep;
|
||||
if (!mm_exclusive(mm))
|
||||
__ptep_ipte(address, ptep);
|
||||
ptep_flush_lazy(mm, address, ptep);
|
||||
pte_val(*ptep) = _PAGE_INVALID;
|
||||
|
||||
if (mm_has_pgste(mm)) {
|
||||
@ -1083,15 +1086,13 @@ static inline pte_t ptep_modify_prot_start(struct mm_struct *mm,
|
||||
pgste_t pgste;
|
||||
pte_t pte;
|
||||
|
||||
mm->context.flush_mm = 1;
|
||||
if (mm_has_pgste(mm)) {
|
||||
pgste = pgste_get_lock(ptep);
|
||||
pgste_ipte_notify(mm, address, ptep, pgste);
|
||||
}
|
||||
|
||||
pte = *ptep;
|
||||
if (!mm_exclusive(mm))
|
||||
__ptep_ipte(address, ptep);
|
||||
ptep_flush_lazy(mm, address, ptep);
|
||||
|
||||
if (mm_has_pgste(mm)) {
|
||||
pgste = pgste_update_all(&pte, pgste);
|
||||
@ -1160,7 +1161,7 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
|
||||
|
||||
pte = *ptep;
|
||||
if (!full)
|
||||
__ptep_ipte(address, ptep);
|
||||
ptep_flush_lazy(mm, address, ptep);
|
||||
pte_val(*ptep) = _PAGE_INVALID;
|
||||
|
||||
if (!full && mm_has_pgste(mm)) {
|
||||
@ -1178,14 +1179,12 @@ static inline pte_t ptep_set_wrprotect(struct mm_struct *mm,
|
||||
pte_t pte = *ptep;
|
||||
|
||||
if (pte_write(pte)) {
|
||||
mm->context.flush_mm = 1;
|
||||
if (mm_has_pgste(mm)) {
|
||||
pgste = pgste_get_lock(ptep);
|
||||
pgste = pgste_ipte_notify(mm, address, ptep, pgste);
|
||||
}
|
||||
|
||||
if (!mm_exclusive(mm))
|
||||
__ptep_ipte(address, ptep);
|
||||
ptep_flush_lazy(mm, address, ptep);
|
||||
pte = pte_wrprotect(pte);
|
||||
|
||||
if (mm_has_pgste(mm)) {
|
||||
|
@ -63,13 +63,14 @@ static inline void tlb_gather_mmu(struct mmu_gather *tlb,
|
||||
|
||||
static inline void tlb_flush_mmu(struct mmu_gather *tlb)
|
||||
{
|
||||
__tlb_flush_mm_lazy(tlb->mm);
|
||||
tlb_table_flush(tlb);
|
||||
}
|
||||
|
||||
static inline void tlb_finish_mmu(struct mmu_gather *tlb,
|
||||
unsigned long start, unsigned long end)
|
||||
{
|
||||
tlb_table_flush(tlb);
|
||||
tlb_flush_mmu(tlb);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -86,7 +86,7 @@ static inline void __tlb_flush_mm(struct mm_struct * mm)
|
||||
__tlb_flush_full(mm);
|
||||
}
|
||||
|
||||
static inline void __tlb_flush_mm_cond(struct mm_struct * mm)
|
||||
static inline void __tlb_flush_mm_lazy(struct mm_struct * mm)
|
||||
{
|
||||
if (mm->context.flush_mm) {
|
||||
__tlb_flush_mm(mm);
|
||||
@ -118,13 +118,13 @@ static inline void __tlb_flush_mm_cond(struct mm_struct * mm)
|
||||
|
||||
static inline void flush_tlb_mm(struct mm_struct *mm)
|
||||
{
|
||||
__tlb_flush_mm_cond(mm);
|
||||
__tlb_flush_mm_lazy(mm);
|
||||
}
|
||||
|
||||
static inline void flush_tlb_range(struct vm_area_struct *vma,
|
||||
unsigned long start, unsigned long end)
|
||||
{
|
||||
__tlb_flush_mm_cond(vma->vm_mm);
|
||||
__tlb_flush_mm_lazy(vma->vm_mm);
|
||||
}
|
||||
|
||||
static inline void flush_tlb_kernel_range(unsigned long start,
|
||||
|
@ -1008,7 +1008,6 @@ void tlb_table_flush(struct mmu_gather *tlb)
|
||||
struct mmu_table_batch **batch = &tlb->batch;
|
||||
|
||||
if (*batch) {
|
||||
__tlb_flush_mm(tlb->mm);
|
||||
call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
|
||||
*batch = NULL;
|
||||
}
|
||||
@ -1018,11 +1017,12 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table)
|
||||
{
|
||||
struct mmu_table_batch **batch = &tlb->batch;
|
||||
|
||||
tlb->mm->context.flush_mm = 1;
|
||||
if (*batch == NULL) {
|
||||
*batch = (struct mmu_table_batch *)
|
||||
__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
|
||||
if (*batch == NULL) {
|
||||
__tlb_flush_mm(tlb->mm);
|
||||
__tlb_flush_mm_lazy(tlb->mm);
|
||||
tlb_remove_table_one(table);
|
||||
return;
|
||||
}
|
||||
@ -1030,7 +1030,7 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table)
|
||||
}
|
||||
(*batch)->tables[(*batch)->nr++] = table;
|
||||
if ((*batch)->nr == MAX_TABLE_BATCH)
|
||||
tlb_table_flush(tlb);
|
||||
tlb_flush_mmu(tlb);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
|
Loading…
Reference in New Issue
Block a user