mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 12:28:41 +08:00
mm: re-architect the VM_UNPAGED logic
This replaces the (in my opinion horrible) VM_UNMAPPED logic with very explicit support for a "remapped page range" aka VM_PFNMAP. It allows a VM area to contain an arbitrary range of page table entries that the VM never touches, and never considers to be normal pages. Any user of "remap_pfn_range()" automatically gets this new functionality, and doesn't even have to mark the pages reserved or indeed mark them any other way. It just works. As a side effect, doing mmap() on /dev/mem works for arbitrary ranges. Sparc update from David in the next commit. Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
458af5439f
commit
6aab341e0a
@ -145,8 +145,7 @@ static void dump_vdso_pages(struct vm_area_struct * vma)
|
|||||||
struct page *pg = virt_to_page(vdso32_kbase +
|
struct page *pg = virt_to_page(vdso32_kbase +
|
||||||
i*PAGE_SIZE);
|
i*PAGE_SIZE);
|
||||||
struct page *upg = (vma && vma->vm_mm) ?
|
struct page *upg = (vma && vma->vm_mm) ?
|
||||||
follow_page(vma->vm_mm, vma->vm_start +
|
follow_page(vma, vma->vm_start + i*PAGE_SIZE, 0)
|
||||||
i*PAGE_SIZE, 0)
|
|
||||||
: NULL;
|
: NULL;
|
||||||
dump_one_vdso_page(pg, upg);
|
dump_one_vdso_page(pg, upg);
|
||||||
}
|
}
|
||||||
@ -157,8 +156,7 @@ static void dump_vdso_pages(struct vm_area_struct * vma)
|
|||||||
struct page *pg = virt_to_page(vdso64_kbase +
|
struct page *pg = virt_to_page(vdso64_kbase +
|
||||||
i*PAGE_SIZE);
|
i*PAGE_SIZE);
|
||||||
struct page *upg = (vma && vma->vm_mm) ?
|
struct page *upg = (vma && vma->vm_mm) ?
|
||||||
follow_page(vma->vm_mm, vma->vm_start +
|
follow_page(vma, vma->vm_start + i*PAGE_SIZE, 0)
|
||||||
i*PAGE_SIZE, 0)
|
|
||||||
: NULL;
|
: NULL;
|
||||||
dump_one_vdso_page(pg, upg);
|
dump_one_vdso_page(pg, upg);
|
||||||
}
|
}
|
||||||
|
@ -591,7 +591,7 @@ static inline size_t read_zero_pagealigned(char __user * buf, size_t size)
|
|||||||
|
|
||||||
if (vma->vm_start > addr || (vma->vm_flags & VM_WRITE) == 0)
|
if (vma->vm_start > addr || (vma->vm_flags & VM_WRITE) == 0)
|
||||||
goto out_up;
|
goto out_up;
|
||||||
if (vma->vm_flags & (VM_SHARED | VM_HUGETLB | VM_UNPAGED))
|
if (vma->vm_flags & (VM_SHARED | VM_HUGETLB))
|
||||||
break;
|
break;
|
||||||
count = vma->vm_end - addr;
|
count = vma->vm_end - addr;
|
||||||
if (count > size)
|
if (count > size)
|
||||||
|
@ -402,12 +402,11 @@ struct numa_maps {
|
|||||||
/*
|
/*
|
||||||
* Calculate numa node maps for a vma
|
* Calculate numa node maps for a vma
|
||||||
*/
|
*/
|
||||||
static struct numa_maps *get_numa_maps(const struct vm_area_struct *vma)
|
static struct numa_maps *get_numa_maps(struct vm_area_struct *vma)
|
||||||
{
|
{
|
||||||
|
int i;
|
||||||
struct page *page;
|
struct page *page;
|
||||||
unsigned long vaddr;
|
unsigned long vaddr;
|
||||||
struct mm_struct *mm = vma->vm_mm;
|
|
||||||
int i;
|
|
||||||
struct numa_maps *md = kmalloc(sizeof(struct numa_maps), GFP_KERNEL);
|
struct numa_maps *md = kmalloc(sizeof(struct numa_maps), GFP_KERNEL);
|
||||||
|
|
||||||
if (!md)
|
if (!md)
|
||||||
@ -420,7 +419,7 @@ static struct numa_maps *get_numa_maps(const struct vm_area_struct *vma)
|
|||||||
md->node[i] =0;
|
md->node[i] =0;
|
||||||
|
|
||||||
for (vaddr = vma->vm_start; vaddr < vma->vm_end; vaddr += PAGE_SIZE) {
|
for (vaddr = vma->vm_start; vaddr < vma->vm_end; vaddr += PAGE_SIZE) {
|
||||||
page = follow_page(mm, vaddr, 0);
|
page = follow_page(vma, vaddr, 0);
|
||||||
if (page) {
|
if (page) {
|
||||||
int count = page_mapcount(page);
|
int count = page_mapcount(page);
|
||||||
|
|
||||||
|
@ -145,7 +145,7 @@ extern unsigned int kobjsize(const void *objp);
|
|||||||
#define VM_GROWSDOWN 0x00000100 /* general info on the segment */
|
#define VM_GROWSDOWN 0x00000100 /* general info on the segment */
|
||||||
#define VM_GROWSUP 0x00000200
|
#define VM_GROWSUP 0x00000200
|
||||||
#define VM_SHM 0x00000000 /* Means nothing: delete it later */
|
#define VM_SHM 0x00000000 /* Means nothing: delete it later */
|
||||||
#define VM_UNPAGED 0x00000400 /* Pages managed without map count */
|
#define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */
|
||||||
#define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */
|
#define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */
|
||||||
|
|
||||||
#define VM_EXECUTABLE 0x00001000
|
#define VM_EXECUTABLE 0x00001000
|
||||||
@ -664,6 +664,7 @@ struct zap_details {
|
|||||||
unsigned long truncate_count; /* Compare vm_truncate_count */
|
unsigned long truncate_count; /* Compare vm_truncate_count */
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct page *vm_normal_page(struct vm_area_struct *, unsigned long, pte_t);
|
||||||
unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
|
unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
|
||||||
unsigned long size, struct zap_details *);
|
unsigned long size, struct zap_details *);
|
||||||
unsigned long unmap_vmas(struct mmu_gather **tlb,
|
unsigned long unmap_vmas(struct mmu_gather **tlb,
|
||||||
@ -953,7 +954,7 @@ unsigned long vmalloc_to_pfn(void *addr);
|
|||||||
int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
|
int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
|
||||||
unsigned long pfn, unsigned long size, pgprot_t);
|
unsigned long pfn, unsigned long size, pgprot_t);
|
||||||
|
|
||||||
struct page *follow_page(struct mm_struct *, unsigned long address,
|
struct page *follow_page(struct vm_area_struct *, unsigned long address,
|
||||||
unsigned int foll_flags);
|
unsigned int foll_flags);
|
||||||
#define FOLL_WRITE 0x01 /* check pte is writable */
|
#define FOLL_WRITE 0x01 /* check pte is writable */
|
||||||
#define FOLL_TOUCH 0x02 /* mark page accessed */
|
#define FOLL_TOUCH 0x02 /* mark page accessed */
|
||||||
|
16
mm/fremap.c
16
mm/fremap.c
@ -27,24 +27,20 @@ static int zap_pte(struct mm_struct *mm, struct vm_area_struct *vma,
|
|||||||
struct page *page = NULL;
|
struct page *page = NULL;
|
||||||
|
|
||||||
if (pte_present(pte)) {
|
if (pte_present(pte)) {
|
||||||
unsigned long pfn = pte_pfn(pte);
|
flush_cache_page(vma, addr, pte_pfn(pte));
|
||||||
flush_cache_page(vma, addr, pfn);
|
|
||||||
pte = ptep_clear_flush(vma, addr, ptep);
|
pte = ptep_clear_flush(vma, addr, ptep);
|
||||||
if (unlikely(!pfn_valid(pfn))) {
|
page = vm_normal_page(vma, addr, pte);
|
||||||
print_bad_pte(vma, pte, addr);
|
if (page) {
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
page = pfn_to_page(pfn);
|
|
||||||
if (pte_dirty(pte))
|
if (pte_dirty(pte))
|
||||||
set_page_dirty(page);
|
set_page_dirty(page);
|
||||||
page_remove_rmap(page);
|
page_remove_rmap(page);
|
||||||
page_cache_release(page);
|
page_cache_release(page);
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
if (!pte_file(pte))
|
if (!pte_file(pte))
|
||||||
free_swap_and_cache(pte_to_swp_entry(pte));
|
free_swap_and_cache(pte_to_swp_entry(pte));
|
||||||
pte_clear(mm, addr, ptep);
|
pte_clear(mm, addr, ptep);
|
||||||
}
|
}
|
||||||
out:
|
|
||||||
return !!page;
|
return !!page;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -65,8 +61,6 @@ int install_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
|||||||
pte_t pte_val;
|
pte_t pte_val;
|
||||||
spinlock_t *ptl;
|
spinlock_t *ptl;
|
||||||
|
|
||||||
BUG_ON(vma->vm_flags & VM_UNPAGED);
|
|
||||||
|
|
||||||
pgd = pgd_offset(mm, addr);
|
pgd = pgd_offset(mm, addr);
|
||||||
pud = pud_alloc(mm, pgd, addr);
|
pud = pud_alloc(mm, pgd, addr);
|
||||||
if (!pud)
|
if (!pud)
|
||||||
@ -122,8 +116,6 @@ int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma,
|
|||||||
pte_t pte_val;
|
pte_t pte_val;
|
||||||
spinlock_t *ptl;
|
spinlock_t *ptl;
|
||||||
|
|
||||||
BUG_ON(vma->vm_flags & VM_UNPAGED);
|
|
||||||
|
|
||||||
pgd = pgd_offset(mm, addr);
|
pgd = pgd_offset(mm, addr);
|
||||||
pud = pud_alloc(mm, pgd, addr);
|
pud = pud_alloc(mm, pgd, addr);
|
||||||
if (!pud)
|
if (!pud)
|
||||||
|
@ -126,7 +126,7 @@ static long madvise_dontneed(struct vm_area_struct * vma,
|
|||||||
unsigned long start, unsigned long end)
|
unsigned long start, unsigned long end)
|
||||||
{
|
{
|
||||||
*prev = vma;
|
*prev = vma;
|
||||||
if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_UNPAGED))
|
if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
|
if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
|
||||||
|
181
mm/memory.c
181
mm/memory.c
@ -333,9 +333,9 @@ static inline void add_mm_rss(struct mm_struct *mm, int file_rss, int anon_rss)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This function is called to print an error when a pte in a
|
* This function is called to print an error when a bad pte
|
||||||
* !VM_UNPAGED region is found pointing to an invalid pfn (which
|
* is found. For example, we might have a PFN-mapped pte in
|
||||||
* is an error.
|
* a region that doesn't allow it.
|
||||||
*
|
*
|
||||||
* The calling function must still handle the error.
|
* The calling function must still handle the error.
|
||||||
*/
|
*/
|
||||||
@ -350,19 +350,56 @@ void print_bad_pte(struct vm_area_struct *vma, pte_t pte, unsigned long vaddr)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* page_is_anon applies strict checks for an anonymous page belonging to
|
* This function gets the "struct page" associated with a pte.
|
||||||
* this vma at this address. It is used on VM_UNPAGED vmas, which are
|
*
|
||||||
* usually populated with shared originals (which must not be counted),
|
* NOTE! Some mappings do not have "struct pages". A raw PFN mapping
|
||||||
* but occasionally contain private COWed copies (when !VM_SHARED, or
|
* will have each page table entry just pointing to a raw page frame
|
||||||
* perhaps via ptrace when VM_SHARED). An mmap of /dev/mem might window
|
* number, and as far as the VM layer is concerned, those do not have
|
||||||
* free pages, pages from other processes, or from other parts of this:
|
* pages associated with them - even if the PFN might point to memory
|
||||||
* it's tricky, but try not to be deceived by foreign anonymous pages.
|
* that otherwise is perfectly fine and has a "struct page".
|
||||||
|
*
|
||||||
|
* The way we recognize those mappings is through the rules set up
|
||||||
|
* by "remap_pfn_range()": the vma will have the VM_PFNMAP bit set,
|
||||||
|
* and the vm_pgoff will point to the first PFN mapped: thus every
|
||||||
|
* page that is a raw mapping will always honor the rule
|
||||||
|
*
|
||||||
|
* pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT)
|
||||||
|
*
|
||||||
|
* and if that isn't true, the page has been COW'ed (in which case it
|
||||||
|
* _does_ have a "struct page" associated with it even if it is in a
|
||||||
|
* VM_PFNMAP range).
|
||||||
*/
|
*/
|
||||||
static inline int page_is_anon(struct page *page,
|
struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
|
||||||
struct vm_area_struct *vma, unsigned long addr)
|
|
||||||
{
|
{
|
||||||
return page && PageAnon(page) && page_mapped(page) &&
|
unsigned long pfn = pte_pfn(pte);
|
||||||
page_address_in_vma(page, vma) == addr;
|
|
||||||
|
if (vma->vm_flags & VM_PFNMAP) {
|
||||||
|
unsigned long off = (addr - vma->vm_start) >> PAGE_SHIFT;
|
||||||
|
if (pfn == vma->vm_pgoff + off)
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Add some anal sanity checks for now. Eventually,
|
||||||
|
* we should just do "return pfn_to_page(pfn)", but
|
||||||
|
* in the meantime we check that we get a valid pfn,
|
||||||
|
* and that the resulting page looks ok.
|
||||||
|
*
|
||||||
|
* Remove this test eventually!
|
||||||
|
*/
|
||||||
|
if (unlikely(!pfn_valid(pfn))) {
|
||||||
|
print_bad_pte(vma, pte, addr);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* NOTE! We still have PageReserved() pages in the page
|
||||||
|
* tables.
|
||||||
|
*
|
||||||
|
* The PAGE_ZERO() pages and various VDSO mappings can
|
||||||
|
* cause them to exist.
|
||||||
|
*/
|
||||||
|
return pfn_to_page(pfn);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -379,7 +416,6 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
|
|||||||
unsigned long vm_flags = vma->vm_flags;
|
unsigned long vm_flags = vma->vm_flags;
|
||||||
pte_t pte = *src_pte;
|
pte_t pte = *src_pte;
|
||||||
struct page *page;
|
struct page *page;
|
||||||
unsigned long pfn;
|
|
||||||
|
|
||||||
/* pte contains position in swap or file, so copy. */
|
/* pte contains position in swap or file, so copy. */
|
||||||
if (unlikely(!pte_present(pte))) {
|
if (unlikely(!pte_present(pte))) {
|
||||||
@ -397,22 +433,6 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
|
|||||||
goto out_set_pte;
|
goto out_set_pte;
|
||||||
}
|
}
|
||||||
|
|
||||||
pfn = pte_pfn(pte);
|
|
||||||
page = pfn_valid(pfn)? pfn_to_page(pfn): NULL;
|
|
||||||
|
|
||||||
if (unlikely(vm_flags & VM_UNPAGED))
|
|
||||||
if (!page_is_anon(page, vma, addr))
|
|
||||||
goto out_set_pte;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* If the pte points outside of valid memory but
|
|
||||||
* the region is not VM_UNPAGED, we have a problem.
|
|
||||||
*/
|
|
||||||
if (unlikely(!page)) {
|
|
||||||
print_bad_pte(vma, pte, addr);
|
|
||||||
goto out_set_pte; /* try to do something sane */
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If it's a COW mapping, write protect it both
|
* If it's a COW mapping, write protect it both
|
||||||
* in the parent and the child
|
* in the parent and the child
|
||||||
@ -429,9 +449,13 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
|
|||||||
if (vm_flags & VM_SHARED)
|
if (vm_flags & VM_SHARED)
|
||||||
pte = pte_mkclean(pte);
|
pte = pte_mkclean(pte);
|
||||||
pte = pte_mkold(pte);
|
pte = pte_mkold(pte);
|
||||||
|
|
||||||
|
page = vm_normal_page(vma, addr, pte);
|
||||||
|
if (page) {
|
||||||
get_page(page);
|
get_page(page);
|
||||||
page_dup_rmap(page);
|
page_dup_rmap(page);
|
||||||
rss[!!PageAnon(page)]++;
|
rss[!!PageAnon(page)]++;
|
||||||
|
}
|
||||||
|
|
||||||
out_set_pte:
|
out_set_pte:
|
||||||
set_pte_at(dst_mm, addr, dst_pte, pte);
|
set_pte_at(dst_mm, addr, dst_pte, pte);
|
||||||
@ -543,7 +567,7 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
|
|||||||
* readonly mappings. The tradeoff is that copy_page_range is more
|
* readonly mappings. The tradeoff is that copy_page_range is more
|
||||||
* efficient than faulting.
|
* efficient than faulting.
|
||||||
*/
|
*/
|
||||||
if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_UNPAGED))) {
|
if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_PFNMAP))) {
|
||||||
if (!vma->anon_vma)
|
if (!vma->anon_vma)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -584,19 +608,10 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
|
|||||||
}
|
}
|
||||||
if (pte_present(ptent)) {
|
if (pte_present(ptent)) {
|
||||||
struct page *page;
|
struct page *page;
|
||||||
unsigned long pfn;
|
|
||||||
|
|
||||||
(*zap_work) -= PAGE_SIZE;
|
(*zap_work) -= PAGE_SIZE;
|
||||||
|
|
||||||
pfn = pte_pfn(ptent);
|
page = vm_normal_page(vma, addr, ptent);
|
||||||
page = pfn_valid(pfn)? pfn_to_page(pfn): NULL;
|
|
||||||
|
|
||||||
if (unlikely(vma->vm_flags & VM_UNPAGED)) {
|
|
||||||
if (!page_is_anon(page, vma, addr))
|
|
||||||
page = NULL;
|
|
||||||
} else if (unlikely(!page))
|
|
||||||
print_bad_pte(vma, ptent, addr);
|
|
||||||
|
|
||||||
if (unlikely(details) && page) {
|
if (unlikely(details) && page) {
|
||||||
/*
|
/*
|
||||||
* unmap_shared_mapping_pages() wants to
|
* unmap_shared_mapping_pages() wants to
|
||||||
@ -852,7 +867,7 @@ unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
|
|||||||
/*
|
/*
|
||||||
* Do a quick page-table lookup for a single page.
|
* Do a quick page-table lookup for a single page.
|
||||||
*/
|
*/
|
||||||
struct page *follow_page(struct mm_struct *mm, unsigned long address,
|
struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
|
||||||
unsigned int flags)
|
unsigned int flags)
|
||||||
{
|
{
|
||||||
pgd_t *pgd;
|
pgd_t *pgd;
|
||||||
@ -860,8 +875,8 @@ struct page *follow_page(struct mm_struct *mm, unsigned long address,
|
|||||||
pmd_t *pmd;
|
pmd_t *pmd;
|
||||||
pte_t *ptep, pte;
|
pte_t *ptep, pte;
|
||||||
spinlock_t *ptl;
|
spinlock_t *ptl;
|
||||||
unsigned long pfn;
|
|
||||||
struct page *page;
|
struct page *page;
|
||||||
|
struct mm_struct *mm = vma->vm_mm;
|
||||||
|
|
||||||
page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
|
page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
|
||||||
if (!IS_ERR(page)) {
|
if (!IS_ERR(page)) {
|
||||||
@ -897,11 +912,10 @@ struct page *follow_page(struct mm_struct *mm, unsigned long address,
|
|||||||
goto unlock;
|
goto unlock;
|
||||||
if ((flags & FOLL_WRITE) && !pte_write(pte))
|
if ((flags & FOLL_WRITE) && !pte_write(pte))
|
||||||
goto unlock;
|
goto unlock;
|
||||||
pfn = pte_pfn(pte);
|
page = vm_normal_page(vma, address, pte);
|
||||||
if (!pfn_valid(pfn))
|
if (unlikely(!page))
|
||||||
goto unlock;
|
goto unlock;
|
||||||
|
|
||||||
page = pfn_to_page(pfn);
|
|
||||||
if (flags & FOLL_GET)
|
if (flags & FOLL_GET)
|
||||||
get_page(page);
|
get_page(page);
|
||||||
if (flags & FOLL_TOUCH) {
|
if (flags & FOLL_TOUCH) {
|
||||||
@ -974,8 +988,10 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
|||||||
return i ? : -EFAULT;
|
return i ? : -EFAULT;
|
||||||
}
|
}
|
||||||
if (pages) {
|
if (pages) {
|
||||||
pages[i] = pte_page(*pte);
|
struct page *page = vm_normal_page(vma, start, *pte);
|
||||||
get_page(pages[i]);
|
pages[i] = page;
|
||||||
|
if (page)
|
||||||
|
get_page(page);
|
||||||
}
|
}
|
||||||
pte_unmap(pte);
|
pte_unmap(pte);
|
||||||
if (vmas)
|
if (vmas)
|
||||||
@ -1010,7 +1026,7 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
|||||||
foll_flags |= FOLL_WRITE;
|
foll_flags |= FOLL_WRITE;
|
||||||
|
|
||||||
cond_resched();
|
cond_resched();
|
||||||
while (!(page = follow_page(mm, start, foll_flags))) {
|
while (!(page = follow_page(vma, start, foll_flags))) {
|
||||||
int ret;
|
int ret;
|
||||||
ret = __handle_mm_fault(mm, vma, start,
|
ret = __handle_mm_fault(mm, vma, start,
|
||||||
foll_flags & FOLL_WRITE);
|
foll_flags & FOLL_WRITE);
|
||||||
@ -1214,11 +1230,12 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
|
|||||||
* in 2.6 the LRU scan won't even find its pages, so this
|
* in 2.6 the LRU scan won't even find its pages, so this
|
||||||
* flag means no more than count its pages in reserved_vm,
|
* flag means no more than count its pages in reserved_vm,
|
||||||
* and omit it from core dump, even when VM_IO turned off.
|
* and omit it from core dump, even when VM_IO turned off.
|
||||||
* VM_UNPAGED tells the core MM not to "manage" these pages
|
* VM_PFNMAP tells the core MM that the base pages are just
|
||||||
* (e.g. refcount, mapcount, try to swap them out): in
|
* raw PFN mappings, and do not have a "struct page" associated
|
||||||
* particular, zap_pte_range does not try to free them.
|
* with them.
|
||||||
*/
|
*/
|
||||||
vma->vm_flags |= VM_IO | VM_RESERVED | VM_UNPAGED;
|
vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
|
||||||
|
vma->vm_pgoff = pfn;
|
||||||
|
|
||||||
BUG_ON(addr >= end);
|
BUG_ON(addr >= end);
|
||||||
pfn -= addr >> PAGE_SHIFT;
|
pfn -= addr >> PAGE_SHIFT;
|
||||||
@ -1273,6 +1290,26 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
|
|||||||
return pte;
|
return pte;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* If the source page was a PFN mapping, we don't have
|
||||||
|
* a "struct page" for it. We do a best-effort copy by
|
||||||
|
* just copying from the original user address. If that
|
||||||
|
* fails, we just zero-fill it. Live with it.
|
||||||
|
*/
|
||||||
|
if (unlikely(!src)) {
|
||||||
|
void *kaddr = kmap_atomic(dst, KM_USER0);
|
||||||
|
unsigned long left = __copy_from_user_inatomic(kaddr, (void __user *)va, PAGE_SIZE);
|
||||||
|
if (left)
|
||||||
|
memset(kaddr, 0, PAGE_SIZE);
|
||||||
|
kunmap_atomic(kaddr, KM_USER0);
|
||||||
|
return;
|
||||||
|
|
||||||
|
}
|
||||||
|
copy_user_highpage(dst, src, va);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This routine handles present pages, when users try to write
|
* This routine handles present pages, when users try to write
|
||||||
* to a shared page. It is done by copying the page to a new address
|
* to a shared page. It is done by copying the page to a new address
|
||||||
@ -1296,28 +1333,13 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
|||||||
spinlock_t *ptl, pte_t orig_pte)
|
spinlock_t *ptl, pte_t orig_pte)
|
||||||
{
|
{
|
||||||
struct page *old_page, *src_page, *new_page;
|
struct page *old_page, *src_page, *new_page;
|
||||||
unsigned long pfn = pte_pfn(orig_pte);
|
|
||||||
pte_t entry;
|
pte_t entry;
|
||||||
int ret = VM_FAULT_MINOR;
|
int ret = VM_FAULT_MINOR;
|
||||||
|
|
||||||
if (unlikely(!pfn_valid(pfn))) {
|
old_page = vm_normal_page(vma, address, orig_pte);
|
||||||
/*
|
|
||||||
* Page table corrupted: show pte and kill process.
|
|
||||||
* Or it's an attempt to COW an out-of-map VM_UNPAGED
|
|
||||||
* entry, which copy_user_highpage does not support.
|
|
||||||
*/
|
|
||||||
print_bad_pte(vma, orig_pte, address);
|
|
||||||
ret = VM_FAULT_OOM;
|
|
||||||
goto unlock;
|
|
||||||
}
|
|
||||||
old_page = pfn_to_page(pfn);
|
|
||||||
src_page = old_page;
|
src_page = old_page;
|
||||||
|
if (!old_page)
|
||||||
if (unlikely(vma->vm_flags & VM_UNPAGED))
|
|
||||||
if (!page_is_anon(old_page, vma, address)) {
|
|
||||||
old_page = NULL;
|
|
||||||
goto gotten;
|
goto gotten;
|
||||||
}
|
|
||||||
|
|
||||||
if (PageAnon(old_page) && !TestSetPageLocked(old_page)) {
|
if (PageAnon(old_page) && !TestSetPageLocked(old_page)) {
|
||||||
int reuse = can_share_swap_page(old_page);
|
int reuse = can_share_swap_page(old_page);
|
||||||
@ -1351,7 +1373,7 @@ gotten:
|
|||||||
new_page = alloc_page_vma(GFP_HIGHUSER, vma, address);
|
new_page = alloc_page_vma(GFP_HIGHUSER, vma, address);
|
||||||
if (!new_page)
|
if (!new_page)
|
||||||
goto oom;
|
goto oom;
|
||||||
copy_user_highpage(new_page, src_page, address);
|
cow_user_page(new_page, src_page, address);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1812,16 +1834,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
|||||||
spinlock_t *ptl;
|
spinlock_t *ptl;
|
||||||
pte_t entry;
|
pte_t entry;
|
||||||
|
|
||||||
/*
|
if (write_access) {
|
||||||
* A VM_UNPAGED vma will normally be filled with present ptes
|
|
||||||
* by remap_pfn_range, and never arrive here; but it might have
|
|
||||||
* holes, or if !VM_DONTEXPAND, mremap might have expanded it.
|
|
||||||
* It's weird enough handling anon pages in unpaged vmas, we do
|
|
||||||
* not want to worry about ZERO_PAGEs too (it may or may not
|
|
||||||
* matter if their counts wrap): just give them anon pages.
|
|
||||||
*/
|
|
||||||
|
|
||||||
if (write_access || (vma->vm_flags & VM_UNPAGED)) {
|
|
||||||
/* Allocate our own private page. */
|
/* Allocate our own private page. */
|
||||||
pte_unmap(page_table);
|
pte_unmap(page_table);
|
||||||
|
|
||||||
@ -1896,8 +1909,6 @@ static int do_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
|||||||
int anon = 0;
|
int anon = 0;
|
||||||
|
|
||||||
pte_unmap(page_table);
|
pte_unmap(page_table);
|
||||||
BUG_ON(vma->vm_flags & VM_UNPAGED);
|
|
||||||
|
|
||||||
if (vma->vm_file) {
|
if (vma->vm_file) {
|
||||||
mapping = vma->vm_file->f_mapping;
|
mapping = vma->vm_file->f_mapping;
|
||||||
sequence = mapping->truncate_count;
|
sequence = mapping->truncate_count;
|
||||||
@ -1930,7 +1941,7 @@ retry:
|
|||||||
page = alloc_page_vma(GFP_HIGHUSER, vma, address);
|
page = alloc_page_vma(GFP_HIGHUSER, vma, address);
|
||||||
if (!page)
|
if (!page)
|
||||||
goto oom;
|
goto oom;
|
||||||
copy_user_highpage(page, new_page, address);
|
cow_user_page(page, new_page, address);
|
||||||
page_cache_release(new_page);
|
page_cache_release(new_page);
|
||||||
new_page = page;
|
new_page = page;
|
||||||
anon = 1;
|
anon = 1;
|
||||||
|
@ -189,17 +189,15 @@ static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
|
|||||||
|
|
||||||
orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
|
orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
|
||||||
do {
|
do {
|
||||||
unsigned long pfn;
|
struct page *page;
|
||||||
unsigned int nid;
|
unsigned int nid;
|
||||||
|
|
||||||
if (!pte_present(*pte))
|
if (!pte_present(*pte))
|
||||||
continue;
|
continue;
|
||||||
pfn = pte_pfn(*pte);
|
page = vm_normal_page(vma, addr, *pte);
|
||||||
if (!pfn_valid(pfn)) {
|
if (!page)
|
||||||
print_bad_pte(vma, *pte, addr);
|
|
||||||
continue;
|
continue;
|
||||||
}
|
nid = page_to_nid(page);
|
||||||
nid = pfn_to_nid(pfn);
|
|
||||||
if (!node_isset(nid, *nodes))
|
if (!node_isset(nid, *nodes))
|
||||||
break;
|
break;
|
||||||
} while (pte++, addr += PAGE_SIZE, addr != end);
|
} while (pte++, addr += PAGE_SIZE, addr != end);
|
||||||
@ -269,8 +267,6 @@ check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
|
|||||||
first = find_vma(mm, start);
|
first = find_vma(mm, start);
|
||||||
if (!first)
|
if (!first)
|
||||||
return ERR_PTR(-EFAULT);
|
return ERR_PTR(-EFAULT);
|
||||||
if (first->vm_flags & VM_UNPAGED)
|
|
||||||
return ERR_PTR(-EACCES);
|
|
||||||
prev = NULL;
|
prev = NULL;
|
||||||
for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
|
for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
|
||||||
if (!vma->vm_next && vma->vm_end < end)
|
if (!vma->vm_next && vma->vm_end < end)
|
||||||
|
12
mm/msync.c
12
mm/msync.c
@ -27,7 +27,6 @@ static void msync_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
|
|||||||
again:
|
again:
|
||||||
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
|
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
|
||||||
do {
|
do {
|
||||||
unsigned long pfn;
|
|
||||||
struct page *page;
|
struct page *page;
|
||||||
|
|
||||||
if (progress >= 64) {
|
if (progress >= 64) {
|
||||||
@ -40,13 +39,9 @@ again:
|
|||||||
continue;
|
continue;
|
||||||
if (!pte_maybe_dirty(*pte))
|
if (!pte_maybe_dirty(*pte))
|
||||||
continue;
|
continue;
|
||||||
pfn = pte_pfn(*pte);
|
page = vm_normal_page(vma, addr, *pte);
|
||||||
if (unlikely(!pfn_valid(pfn))) {
|
if (!page)
|
||||||
print_bad_pte(vma, *pte, addr);
|
|
||||||
continue;
|
continue;
|
||||||
}
|
|
||||||
page = pfn_to_page(pfn);
|
|
||||||
|
|
||||||
if (ptep_clear_flush_dirty(vma, addr, pte) ||
|
if (ptep_clear_flush_dirty(vma, addr, pte) ||
|
||||||
page_test_and_clear_dirty(page))
|
page_test_and_clear_dirty(page))
|
||||||
set_page_dirty(page);
|
set_page_dirty(page);
|
||||||
@ -97,9 +92,8 @@ static void msync_page_range(struct vm_area_struct *vma,
|
|||||||
/* For hugepages we can't go walking the page table normally,
|
/* For hugepages we can't go walking the page table normally,
|
||||||
* but that's ok, hugetlbfs is memory based, so we don't need
|
* but that's ok, hugetlbfs is memory based, so we don't need
|
||||||
* to do anything more on an msync().
|
* to do anything more on an msync().
|
||||||
* Can't do anything with VM_UNPAGED regions either.
|
|
||||||
*/
|
*/
|
||||||
if (vma->vm_flags & (VM_HUGETLB|VM_UNPAGED))
|
if (vma->vm_flags & VM_HUGETLB)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
BUG_ON(addr >= end);
|
BUG_ON(addr >= end);
|
||||||
|
@ -1045,7 +1045,7 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
|
|||||||
|
|
||||||
EXPORT_SYMBOL(find_vma);
|
EXPORT_SYMBOL(find_vma);
|
||||||
|
|
||||||
struct page *follow_page(struct mm_struct *mm, unsigned long address,
|
struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
|
||||||
unsigned int foll_flags)
|
unsigned int foll_flags)
|
||||||
{
|
{
|
||||||
return NULL;
|
return NULL;
|
||||||
|
14
mm/rmap.c
14
mm/rmap.c
@ -226,8 +226,6 @@ vma_address(struct page *page, struct vm_area_struct *vma)
|
|||||||
/*
|
/*
|
||||||
* At what user virtual address is page expected in vma? checking that the
|
* At what user virtual address is page expected in vma? checking that the
|
||||||
* page matches the vma: currently only used on anon pages, by unuse_vma;
|
* page matches the vma: currently only used on anon pages, by unuse_vma;
|
||||||
* and by extraordinary checks on anon pages in VM_UNPAGED vmas, taking
|
|
||||||
* care that an mmap of /dev/mem might window free and foreign pages.
|
|
||||||
*/
|
*/
|
||||||
unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
|
unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
|
||||||
{
|
{
|
||||||
@ -614,7 +612,6 @@ static void try_to_unmap_cluster(unsigned long cursor,
|
|||||||
struct page *page;
|
struct page *page;
|
||||||
unsigned long address;
|
unsigned long address;
|
||||||
unsigned long end;
|
unsigned long end;
|
||||||
unsigned long pfn;
|
|
||||||
|
|
||||||
address = (vma->vm_start + cursor) & CLUSTER_MASK;
|
address = (vma->vm_start + cursor) & CLUSTER_MASK;
|
||||||
end = address + CLUSTER_SIZE;
|
end = address + CLUSTER_SIZE;
|
||||||
@ -643,15 +640,8 @@ static void try_to_unmap_cluster(unsigned long cursor,
|
|||||||
for (; address < end; pte++, address += PAGE_SIZE) {
|
for (; address < end; pte++, address += PAGE_SIZE) {
|
||||||
if (!pte_present(*pte))
|
if (!pte_present(*pte))
|
||||||
continue;
|
continue;
|
||||||
|
page = vm_normal_page(vma, address, *pte);
|
||||||
pfn = pte_pfn(*pte);
|
BUG_ON(!page || PageAnon(page));
|
||||||
if (unlikely(!pfn_valid(pfn))) {
|
|
||||||
print_bad_pte(vma, *pte, address);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
page = pfn_to_page(pfn);
|
|
||||||
BUG_ON(PageAnon(page));
|
|
||||||
|
|
||||||
if (ptep_clear_flush_young(vma, address, pte))
|
if (ptep_clear_flush_young(vma, address, pte))
|
||||||
continue;
|
continue;
|
||||||
|
Loading…
Reference in New Issue
Block a user