mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-19 18:24:14 +08:00
x86/mm: Fix missed global TLB flush stat
If we take the if (end == TLB_FLUSH_ALL || vmflag & VM_HUGETLB) { local_flush_tlb(); goto out; } path out of flush_tlb_mm_range(), we will have flushed the tlb, but not incremented NR_TLB_LOCAL_FLUSH_ALL. This unifies the way out of the function so that we always take a single path when doing a full tlb flush. Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com> Link: http://lkml.kernel.org/r/20140731154056.FF763B76@viggo.jf.intel.com Acked-by: Rik van Riel <riel@redhat.com> Acked-by: Mel Gorman <mgorman@suse.de> Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
This commit is contained in:
parent
e9f4e0a9fe
commit
9dfa6dee53
@ -164,8 +164,9 @@ unsigned long tlb_single_page_flush_ceiling = 1;
|
|||||||
void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
|
void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
|
||||||
unsigned long end, unsigned long vmflag)
|
unsigned long end, unsigned long vmflag)
|
||||||
{
|
{
|
||||||
int need_flush_others_all = 1;
|
|
||||||
unsigned long addr;
|
unsigned long addr;
|
||||||
|
/* do a global flush by default */
|
||||||
|
unsigned long base_pages_to_flush = TLB_FLUSH_ALL;
|
||||||
|
|
||||||
preempt_disable();
|
preempt_disable();
|
||||||
if (current->active_mm != mm)
|
if (current->active_mm != mm)
|
||||||
@ -176,16 +177,14 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (end == TLB_FLUSH_ALL || vmflag & VM_HUGETLB) {
|
if ((end != TLB_FLUSH_ALL) && !(vmflag & VM_HUGETLB))
|
||||||
local_flush_tlb();
|
base_pages_to_flush = (end - start) >> PAGE_SHIFT;
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
if ((end - start) > tlb_single_page_flush_ceiling * PAGE_SIZE) {
|
if (base_pages_to_flush > tlb_single_page_flush_ceiling) {
|
||||||
|
base_pages_to_flush = TLB_FLUSH_ALL;
|
||||||
count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
|
count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
|
||||||
local_flush_tlb();
|
local_flush_tlb();
|
||||||
} else {
|
} else {
|
||||||
need_flush_others_all = 0;
|
|
||||||
/* flush range by one by one 'invlpg' */
|
/* flush range by one by one 'invlpg' */
|
||||||
for (addr = start; addr < end; addr += PAGE_SIZE) {
|
for (addr = start; addr < end; addr += PAGE_SIZE) {
|
||||||
count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
|
count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
|
||||||
@ -193,7 +192,7 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
out:
|
out:
|
||||||
if (need_flush_others_all) {
|
if (base_pages_to_flush == TLB_FLUSH_ALL) {
|
||||||
start = 0UL;
|
start = 0UL;
|
||||||
end = TLB_FLUSH_ALL;
|
end = TLB_FLUSH_ALL;
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user