mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-28 22:54:05 +08:00
Merge branch 'safe-dirty-tlb-flush'
This merges the patch to fix possible loss of dirty bit on munmap() or madvice(DONTNEED). If there are concurrent writers on other CPU's that have the unmapped/unneeded page in their TLBs, their writes to the page could possibly get lost if a third CPU raced with the TLB flush and did a page_mkclean() before the page was fully written. Admittedly, if you unmap() or madvice(DONTNEED) an area _while_ another thread is still busy writing to it, you deserve all the lost writes you could get. But we kernel people hold ourselves to higher quality standards than "crazy people deserve to lose", because, well, we've seen people do all kinds of crazy things. So let's get it right, just because we can, and we don't have to worry about it. * safe-dirty-tlb-flush: mm: split 'tlb_flush_mmu()' into tlb flushing and memory freeing parts
This commit is contained in:
commit
ac6c9e2bed
@ -98,15 +98,25 @@ static inline void __tlb_alloc_page(struct mmu_gather *tlb)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void tlb_flush_mmu(struct mmu_gather *tlb)
|
static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
|
||||||
{
|
{
|
||||||
tlb_flush(tlb);
|
tlb_flush(tlb);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
|
||||||
|
{
|
||||||
free_pages_and_swap_cache(tlb->pages, tlb->nr);
|
free_pages_and_swap_cache(tlb->pages, tlb->nr);
|
||||||
tlb->nr = 0;
|
tlb->nr = 0;
|
||||||
if (tlb->pages == tlb->local)
|
if (tlb->pages == tlb->local)
|
||||||
__tlb_alloc_page(tlb);
|
__tlb_alloc_page(tlb);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void tlb_flush_mmu(struct mmu_gather *tlb)
|
||||||
|
{
|
||||||
|
tlb_flush_mmu_tlbonly(tlb);
|
||||||
|
tlb_flush_mmu_free(tlb);
|
||||||
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
|
tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
|
||||||
{
|
{
|
||||||
|
@ -91,18 +91,9 @@ extern struct ia64_tr_entry *ia64_idtrs[NR_CPUS];
|
|||||||
#define RR_RID_MASK 0x00000000ffffff00L
|
#define RR_RID_MASK 0x00000000ffffff00L
|
||||||
#define RR_TO_RID(val) ((val >> 8) & 0xffffff)
|
#define RR_TO_RID(val) ((val >> 8) & 0xffffff)
|
||||||
|
|
||||||
/*
|
|
||||||
* Flush the TLB for address range START to END and, if not in fast mode, release the
|
|
||||||
* freed pages that where gathered up to this point.
|
|
||||||
*/
|
|
||||||
static inline void
|
static inline void
|
||||||
ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end)
|
ia64_tlb_flush_mmu_tlbonly(struct mmu_gather *tlb, unsigned long start, unsigned long end)
|
||||||
{
|
{
|
||||||
unsigned long i;
|
|
||||||
unsigned int nr;
|
|
||||||
|
|
||||||
if (!tlb->need_flush)
|
|
||||||
return;
|
|
||||||
tlb->need_flush = 0;
|
tlb->need_flush = 0;
|
||||||
|
|
||||||
if (tlb->fullmm) {
|
if (tlb->fullmm) {
|
||||||
@ -135,6 +126,14 @@ ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long e
|
|||||||
flush_tlb_range(&vma, ia64_thash(start), ia64_thash(end));
|
flush_tlb_range(&vma, ia64_thash(start), ia64_thash(end));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
ia64_tlb_flush_mmu_free(struct mmu_gather *tlb)
|
||||||
|
{
|
||||||
|
unsigned long i;
|
||||||
|
unsigned int nr;
|
||||||
|
|
||||||
/* lastly, release the freed pages */
|
/* lastly, release the freed pages */
|
||||||
nr = tlb->nr;
|
nr = tlb->nr;
|
||||||
|
|
||||||
@ -144,6 +143,19 @@ ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long e
|
|||||||
free_page_and_swap_cache(tlb->pages[i]);
|
free_page_and_swap_cache(tlb->pages[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Flush the TLB for address range START to END and, if not in fast mode, release the
|
||||||
|
* freed pages that where gathered up to this point.
|
||||||
|
*/
|
||||||
|
static inline void
|
||||||
|
ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end)
|
||||||
|
{
|
||||||
|
if (!tlb->need_flush)
|
||||||
|
return;
|
||||||
|
ia64_tlb_flush_mmu_tlbonly(tlb, start, end);
|
||||||
|
ia64_tlb_flush_mmu_free(tlb);
|
||||||
|
}
|
||||||
|
|
||||||
static inline void __tlb_alloc_page(struct mmu_gather *tlb)
|
static inline void __tlb_alloc_page(struct mmu_gather *tlb)
|
||||||
{
|
{
|
||||||
unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
|
unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
|
||||||
@ -206,6 +218,16 @@ static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
|
|||||||
return tlb->max - tlb->nr;
|
return tlb->max - tlb->nr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
|
||||||
|
{
|
||||||
|
ia64_tlb_flush_mmu_tlbonly(tlb, tlb->start_addr, tlb->end_addr);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
|
||||||
|
{
|
||||||
|
ia64_tlb_flush_mmu_free(tlb);
|
||||||
|
}
|
||||||
|
|
||||||
static inline void tlb_flush_mmu(struct mmu_gather *tlb)
|
static inline void tlb_flush_mmu(struct mmu_gather *tlb)
|
||||||
{
|
{
|
||||||
ia64_tlb_flush_mmu(tlb, tlb->start_addr, tlb->end_addr);
|
ia64_tlb_flush_mmu(tlb, tlb->start_addr, tlb->end_addr);
|
||||||
|
@ -59,12 +59,23 @@ static inline void tlb_gather_mmu(struct mmu_gather *tlb,
|
|||||||
tlb->batch = NULL;
|
tlb->batch = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void tlb_flush_mmu(struct mmu_gather *tlb)
|
static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
|
||||||
{
|
{
|
||||||
__tlb_flush_mm_lazy(tlb->mm);
|
__tlb_flush_mm_lazy(tlb->mm);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
|
||||||
|
{
|
||||||
tlb_table_flush(tlb);
|
tlb_table_flush(tlb);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static inline void tlb_flush_mmu(struct mmu_gather *tlb)
|
||||||
|
{
|
||||||
|
tlb_flush_mmu_tlbonly(tlb);
|
||||||
|
tlb_flush_mmu_free(tlb);
|
||||||
|
}
|
||||||
|
|
||||||
static inline void tlb_finish_mmu(struct mmu_gather *tlb,
|
static inline void tlb_finish_mmu(struct mmu_gather *tlb,
|
||||||
unsigned long start, unsigned long end)
|
unsigned long start, unsigned long end)
|
||||||
{
|
{
|
||||||
|
@ -86,6 +86,14 @@ tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
static inline void tlb_flush_mmu(struct mmu_gather *tlb)
|
static inline void tlb_flush_mmu(struct mmu_gather *tlb)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
@ -58,14 +58,26 @@ tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start
|
|||||||
extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
|
extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
|
||||||
unsigned long end);
|
unsigned long end);
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
|
||||||
|
{
|
||||||
|
flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
tlb_flush_mmu_free(struct mmu_gather *tlb)
|
||||||
|
{
|
||||||
|
init_tlb_gather(tlb);
|
||||||
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
tlb_flush_mmu(struct mmu_gather *tlb)
|
tlb_flush_mmu(struct mmu_gather *tlb)
|
||||||
{
|
{
|
||||||
if (!tlb->need_flush)
|
if (!tlb->need_flush)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end);
|
tlb_flush_mmu_tlbonly(tlb);
|
||||||
init_tlb_gather(tlb);
|
tlb_flush_mmu_free(tlb);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* tlb_finish_mmu
|
/* tlb_finish_mmu
|
||||||
|
53
mm/memory.c
53
mm/memory.c
@ -232,17 +232,18 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
void tlb_flush_mmu(struct mmu_gather *tlb)
|
static void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
|
||||||
{
|
{
|
||||||
struct mmu_gather_batch *batch;
|
|
||||||
|
|
||||||
if (!tlb->need_flush)
|
|
||||||
return;
|
|
||||||
tlb->need_flush = 0;
|
tlb->need_flush = 0;
|
||||||
tlb_flush(tlb);
|
tlb_flush(tlb);
|
||||||
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
|
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
|
||||||
tlb_table_flush(tlb);
|
tlb_table_flush(tlb);
|
||||||
#endif
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
static void tlb_flush_mmu_free(struct mmu_gather *tlb)
|
||||||
|
{
|
||||||
|
struct mmu_gather_batch *batch;
|
||||||
|
|
||||||
for (batch = &tlb->local; batch; batch = batch->next) {
|
for (batch = &tlb->local; batch; batch = batch->next) {
|
||||||
free_pages_and_swap_cache(batch->pages, batch->nr);
|
free_pages_and_swap_cache(batch->pages, batch->nr);
|
||||||
@ -251,6 +252,14 @@ void tlb_flush_mmu(struct mmu_gather *tlb)
|
|||||||
tlb->active = &tlb->local;
|
tlb->active = &tlb->local;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void tlb_flush_mmu(struct mmu_gather *tlb)
|
||||||
|
{
|
||||||
|
if (!tlb->need_flush)
|
||||||
|
return;
|
||||||
|
tlb_flush_mmu_tlbonly(tlb);
|
||||||
|
tlb_flush_mmu_free(tlb);
|
||||||
|
}
|
||||||
|
|
||||||
/* tlb_finish_mmu
|
/* tlb_finish_mmu
|
||||||
* Called at the end of the shootdown operation to free up any resources
|
* Called at the end of the shootdown operation to free up any resources
|
||||||
* that were required.
|
* that were required.
|
||||||
@ -1127,8 +1136,10 @@ again:
|
|||||||
if (PageAnon(page))
|
if (PageAnon(page))
|
||||||
rss[MM_ANONPAGES]--;
|
rss[MM_ANONPAGES]--;
|
||||||
else {
|
else {
|
||||||
if (pte_dirty(ptent))
|
if (pte_dirty(ptent)) {
|
||||||
|
force_flush = 1;
|
||||||
set_page_dirty(page);
|
set_page_dirty(page);
|
||||||
|
}
|
||||||
if (pte_young(ptent) &&
|
if (pte_young(ptent) &&
|
||||||
likely(!(vma->vm_flags & VM_SEQ_READ)))
|
likely(!(vma->vm_flags & VM_SEQ_READ)))
|
||||||
mark_page_accessed(page);
|
mark_page_accessed(page);
|
||||||
@ -1137,9 +1148,10 @@ again:
|
|||||||
page_remove_rmap(page);
|
page_remove_rmap(page);
|
||||||
if (unlikely(page_mapcount(page) < 0))
|
if (unlikely(page_mapcount(page) < 0))
|
||||||
print_bad_pte(vma, addr, ptent, page);
|
print_bad_pte(vma, addr, ptent, page);
|
||||||
force_flush = !__tlb_remove_page(tlb, page);
|
if (unlikely(!__tlb_remove_page(tlb, page))) {
|
||||||
if (force_flush)
|
force_flush = 1;
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
@ -1174,18 +1186,11 @@ again:
|
|||||||
|
|
||||||
add_mm_rss_vec(mm, rss);
|
add_mm_rss_vec(mm, rss);
|
||||||
arch_leave_lazy_mmu_mode();
|
arch_leave_lazy_mmu_mode();
|
||||||
pte_unmap_unlock(start_pte, ptl);
|
|
||||||
|
|
||||||
/*
|
/* Do the actual TLB flush before dropping ptl */
|
||||||
* mmu_gather ran out of room to batch pages, we break out of
|
|
||||||
* the PTE lock to avoid doing the potential expensive TLB invalidate
|
|
||||||
* and page-free while holding it.
|
|
||||||
*/
|
|
||||||
if (force_flush) {
|
if (force_flush) {
|
||||||
unsigned long old_end;
|
unsigned long old_end;
|
||||||
|
|
||||||
force_flush = 0;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Flush the TLB just for the previous segment,
|
* Flush the TLB just for the previous segment,
|
||||||
* then update the range to be the remaining
|
* then update the range to be the remaining
|
||||||
@ -1193,11 +1198,21 @@ again:
|
|||||||
*/
|
*/
|
||||||
old_end = tlb->end;
|
old_end = tlb->end;
|
||||||
tlb->end = addr;
|
tlb->end = addr;
|
||||||
|
tlb_flush_mmu_tlbonly(tlb);
|
||||||
tlb_flush_mmu(tlb);
|
|
||||||
|
|
||||||
tlb->start = addr;
|
tlb->start = addr;
|
||||||
tlb->end = old_end;
|
tlb->end = old_end;
|
||||||
|
}
|
||||||
|
pte_unmap_unlock(start_pte, ptl);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If we forced a TLB flush (either due to running out of
|
||||||
|
* batch buffers or because we needed to flush dirty TLB
|
||||||
|
* entries before releasing the ptl), free the batched
|
||||||
|
* memory too. Restart if we didn't do everything.
|
||||||
|
*/
|
||||||
|
if (force_flush) {
|
||||||
|
force_flush = 0;
|
||||||
|
tlb_flush_mmu_free(tlb);
|
||||||
|
|
||||||
if (addr != end)
|
if (addr != end)
|
||||||
goto again;
|
goto again;
|
||||||
|
Loading…
Reference in New Issue
Block a user