mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 12:28:41 +08:00
mm/rmap: convert RMAP flags to a proper distinct rmap_t type
We want to pass the flags to more than one anon rmap function, getting rid of special "do_page_add_anon_rmap()". So let's pass around a distinct __bitwise type and refine documentation. Link: https://lkml.kernel.org/r/20220428083441.37290-6-david@redhat.com Signed-off-by: David Hildenbrand <david@redhat.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Christoph Hellwig <hch@lst.de> Cc: David Rientjes <rientjes@google.com> Cc: Don Dutile <ddutile@redhat.com> Cc: Hugh Dickins <hughd@google.com> Cc: Jan Kara <jack@suse.cz> Cc: Jann Horn <jannh@google.com> Cc: Jason Gunthorpe <jgg@nvidia.com> Cc: John Hubbard <jhubbard@nvidia.com> Cc: Khalid Aziz <khalid.aziz@oracle.com> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Liang Zhang <zhangliang5@huawei.com> Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org> Cc: Michal Hocko <mhocko@kernel.org> Cc: Mike Kravetz <mike.kravetz@oracle.com> Cc: Mike Rapoport <rppt@linux.ibm.com> Cc: Nadav Amit <namit@vmware.com> Cc: Oded Gabbay <oded.gabbay@gmail.com> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Pedro Demarchi Gomes <pedrodemargomes@gmail.com> Cc: Peter Xu <peterx@redhat.com> Cc: Rik van Riel <riel@surriel.com> Cc: Roman Gushchin <guro@fb.com> Cc: Shakeel Butt <shakeelb@google.com> Cc: Yang Shi <shy828301@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
fb3d824d1a
commit
14f9135d54
@ -160,9 +160,23 @@ static inline void anon_vma_merge(struct vm_area_struct *vma,
|
|||||||
|
|
||||||
struct anon_vma *page_get_anon_vma(struct page *page);
|
struct anon_vma *page_get_anon_vma(struct page *page);
|
||||||
|
|
||||||
/* bitflags for do_page_add_anon_rmap() */
|
/* RMAP flags, currently only relevant for some anon rmap operations. */
|
||||||
#define RMAP_EXCLUSIVE 0x01
|
typedef int __bitwise rmap_t;
|
||||||
#define RMAP_COMPOUND 0x02
|
|
||||||
|
/*
|
||||||
|
* No special request: if the page is a subpage of a compound page, it is
|
||||||
|
* mapped via a PTE. The mapped (sub)page is possibly shared between processes.
|
||||||
|
*/
|
||||||
|
#define RMAP_NONE ((__force rmap_t)0)
|
||||||
|
|
||||||
|
/* The (sub)page is exclusive to a single process. */
|
||||||
|
#define RMAP_EXCLUSIVE ((__force rmap_t)BIT(0))
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The compound page is not mapped via PTEs, but instead via a single PMD and
|
||||||
|
* should be accounted accordingly.
|
||||||
|
*/
|
||||||
|
#define RMAP_COMPOUND ((__force rmap_t)BIT(1))
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* rmap interfaces called when adding or removing pte of page
|
* rmap interfaces called when adding or removing pte of page
|
||||||
@ -171,7 +185,7 @@ void page_move_anon_rmap(struct page *, struct vm_area_struct *);
|
|||||||
void page_add_anon_rmap(struct page *, struct vm_area_struct *,
|
void page_add_anon_rmap(struct page *, struct vm_area_struct *,
|
||||||
unsigned long address, bool compound);
|
unsigned long address, bool compound);
|
||||||
void do_page_add_anon_rmap(struct page *, struct vm_area_struct *,
|
void do_page_add_anon_rmap(struct page *, struct vm_area_struct *,
|
||||||
unsigned long address, int flags);
|
unsigned long address, rmap_t flags);
|
||||||
void page_add_new_anon_rmap(struct page *, struct vm_area_struct *,
|
void page_add_new_anon_rmap(struct page *, struct vm_area_struct *,
|
||||||
unsigned long address, bool compound);
|
unsigned long address, bool compound);
|
||||||
void page_add_file_rmap(struct page *, struct vm_area_struct *,
|
void page_add_file_rmap(struct page *, struct vm_area_struct *,
|
||||||
|
@ -3511,10 +3511,10 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
|
|||||||
struct vm_area_struct *vma = vmf->vma;
|
struct vm_area_struct *vma = vmf->vma;
|
||||||
struct page *page = NULL, *swapcache;
|
struct page *page = NULL, *swapcache;
|
||||||
struct swap_info_struct *si = NULL;
|
struct swap_info_struct *si = NULL;
|
||||||
|
rmap_t rmap_flags = RMAP_NONE;
|
||||||
swp_entry_t entry;
|
swp_entry_t entry;
|
||||||
pte_t pte;
|
pte_t pte;
|
||||||
int locked;
|
int locked;
|
||||||
int exclusive = 0;
|
|
||||||
vm_fault_t ret = 0;
|
vm_fault_t ret = 0;
|
||||||
void *shadow = NULL;
|
void *shadow = NULL;
|
||||||
|
|
||||||
@ -3689,7 +3689,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
|
|||||||
pte = maybe_mkwrite(pte_mkdirty(pte), vma);
|
pte = maybe_mkwrite(pte_mkdirty(pte), vma);
|
||||||
vmf->flags &= ~FAULT_FLAG_WRITE;
|
vmf->flags &= ~FAULT_FLAG_WRITE;
|
||||||
ret |= VM_FAULT_WRITE;
|
ret |= VM_FAULT_WRITE;
|
||||||
exclusive = RMAP_EXCLUSIVE;
|
rmap_flags |= RMAP_EXCLUSIVE;
|
||||||
}
|
}
|
||||||
flush_icache_page(vma, page);
|
flush_icache_page(vma, page);
|
||||||
if (pte_swp_soft_dirty(vmf->orig_pte))
|
if (pte_swp_soft_dirty(vmf->orig_pte))
|
||||||
@ -3705,7 +3705,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
|
|||||||
page_add_new_anon_rmap(page, vma, vmf->address, false);
|
page_add_new_anon_rmap(page, vma, vmf->address, false);
|
||||||
lru_cache_add_inactive_or_unevictable(page, vma);
|
lru_cache_add_inactive_or_unevictable(page, vma);
|
||||||
} else {
|
} else {
|
||||||
do_page_add_anon_rmap(page, vma, vmf->address, exclusive);
|
do_page_add_anon_rmap(page, vma, vmf->address, rmap_flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte);
|
set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte);
|
||||||
|
@ -1181,7 +1181,8 @@ static void __page_check_anon_rmap(struct page *page,
|
|||||||
void page_add_anon_rmap(struct page *page,
|
void page_add_anon_rmap(struct page *page,
|
||||||
struct vm_area_struct *vma, unsigned long address, bool compound)
|
struct vm_area_struct *vma, unsigned long address, bool compound)
|
||||||
{
|
{
|
||||||
do_page_add_anon_rmap(page, vma, address, compound ? RMAP_COMPOUND : 0);
|
do_page_add_anon_rmap(page, vma, address,
|
||||||
|
compound ? RMAP_COMPOUND : RMAP_NONE);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1190,7 +1191,7 @@ void page_add_anon_rmap(struct page *page,
|
|||||||
* Everybody else should continue to use page_add_anon_rmap above.
|
* Everybody else should continue to use page_add_anon_rmap above.
|
||||||
*/
|
*/
|
||||||
void do_page_add_anon_rmap(struct page *page,
|
void do_page_add_anon_rmap(struct page *page,
|
||||||
struct vm_area_struct *vma, unsigned long address, int flags)
|
struct vm_area_struct *vma, unsigned long address, rmap_t flags)
|
||||||
{
|
{
|
||||||
bool compound = flags & RMAP_COMPOUND;
|
bool compound = flags & RMAP_COMPOUND;
|
||||||
bool first;
|
bool first;
|
||||||
@ -1229,7 +1230,7 @@ void do_page_add_anon_rmap(struct page *page,
|
|||||||
/* address might be in next vma when migration races vma_adjust */
|
/* address might be in next vma when migration races vma_adjust */
|
||||||
else if (first)
|
else if (first)
|
||||||
__page_set_anon_rmap(page, vma, address,
|
__page_set_anon_rmap(page, vma, address,
|
||||||
flags & RMAP_EXCLUSIVE);
|
!!(flags & RMAP_EXCLUSIVE));
|
||||||
else
|
else
|
||||||
__page_check_anon_rmap(page, vma, address);
|
__page_check_anon_rmap(page, vma, address);
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user