mm/rmap: make rmap_walk to get the rmap_walk_control argument

In each rmap traverse case, there is some difference so that we need
function pointers and arguments to them in order to handle these

For this purpose, struct rmap_walk_control is introduced in this patch,
and will be extended in following patch.  Introducing and extending are
separate, because it clarify changes.

Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Reviewed-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Hugh Dickins <hughd@google.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Hillf Danton <dhillf@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Joonsoo Kim 2014-01-21 15:49:48 -08:00 committed by Linus Torvalds
parent faecd8dd85
commit 051ac83adf
5 changed files with 27 additions and 21 deletions

View File

@ -76,8 +76,7 @@ struct page *ksm_might_need_to_copy(struct page *page,
int page_referenced_ksm(struct page *page,
struct mem_cgroup *memcg, unsigned long *vm_flags);
int try_to_unmap_ksm(struct page *page, enum ttu_flags flags);
int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page *,
struct vm_area_struct *, unsigned long, void *), void *arg);
int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc);
void ksm_migrate_page(struct page *newpage, struct page *oldpage);
#else /* !CONFIG_KSM */
@ -120,8 +119,8 @@ static inline int try_to_unmap_ksm(struct page *page, enum ttu_flags flags)
return 0;
}
static inline int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page*,
struct vm_area_struct *, unsigned long, void *), void *arg)
static inline int rmap_walk_ksm(struct page *page,
struct rmap_walk_control *rwc)
{
return 0;
}

View File

@ -235,11 +235,16 @@ struct anon_vma *page_lock_anon_vma_read(struct page *page);
void page_unlock_anon_vma_read(struct anon_vma *anon_vma);
int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
struct rmap_walk_control {
void *arg;
int (*rmap_one)(struct page *page, struct vm_area_struct *vma,
unsigned long addr, void *arg);
};
/*
* Called by migrate.c to remove migration ptes, but might be used more later.
*/
int rmap_walk(struct page *page, int (*rmap_one)(struct page *,
struct vm_area_struct *, unsigned long, void *), void *arg);
int rmap_walk(struct page *page, struct rmap_walk_control *rwc);
#else /* !CONFIG_MMU */

View File

@ -1997,8 +1997,7 @@ out:
}
#ifdef CONFIG_MIGRATION
int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page *,
struct vm_area_struct *, unsigned long, void *), void *arg)
int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc)
{
struct stable_node *stable_node;
struct rmap_item *rmap_item;
@ -2033,7 +2032,8 @@ again:
if ((rmap_item->mm == vma->vm_mm) == search_new_forks)
continue;
ret = rmap_one(page, vma, rmap_item->address, arg);
ret = rwc->rmap_one(page, vma,
rmap_item->address, rwc->arg);
if (ret != SWAP_AGAIN) {
anon_vma_unlock_read(anon_vma);
goto out;

View File

@ -199,7 +199,12 @@ out:
*/
static void remove_migration_ptes(struct page *old, struct page *new)
{
rmap_walk(new, remove_migration_pte, old);
struct rmap_walk_control rwc = {
.rmap_one = remove_migration_pte,
.arg = old,
};
rmap_walk(new, &rwc);
}
/*

View File

@ -1706,8 +1706,7 @@ static struct anon_vma *rmap_walk_anon_lock(struct page *page)
* rmap_walk() and its helpers rmap_walk_anon() and rmap_walk_file():
* Called by migrate.c to remove migration ptes, but might be used more later.
*/
static int rmap_walk_anon(struct page *page, int (*rmap_one)(struct page *,
struct vm_area_struct *, unsigned long, void *), void *arg)
static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc)
{
struct anon_vma *anon_vma;
pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
@ -1721,7 +1720,7 @@ static int rmap_walk_anon(struct page *page, int (*rmap_one)(struct page *,
anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) {
struct vm_area_struct *vma = avc->vma;
unsigned long address = vma_address(page, vma);
ret = rmap_one(page, vma, address, arg);
ret = rwc->rmap_one(page, vma, address, rwc->arg);
if (ret != SWAP_AGAIN)
break;
}
@ -1729,8 +1728,7 @@ static int rmap_walk_anon(struct page *page, int (*rmap_one)(struct page *,
return ret;
}
static int rmap_walk_file(struct page *page, int (*rmap_one)(struct page *,
struct vm_area_struct *, unsigned long, void *), void *arg)
static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc)
{
struct address_space *mapping = page->mapping;
pgoff_t pgoff = page->index << compound_order(page);
@ -1742,7 +1740,7 @@ static int rmap_walk_file(struct page *page, int (*rmap_one)(struct page *,
mutex_lock(&mapping->i_mmap_mutex);
vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
unsigned long address = vma_address(page, vma);
ret = rmap_one(page, vma, address, arg);
ret = rwc->rmap_one(page, vma, address, rwc->arg);
if (ret != SWAP_AGAIN)
break;
}
@ -1755,17 +1753,16 @@ static int rmap_walk_file(struct page *page, int (*rmap_one)(struct page *,
return ret;
}
int rmap_walk(struct page *page, int (*rmap_one)(struct page *,
struct vm_area_struct *, unsigned long, void *), void *arg)
int rmap_walk(struct page *page, struct rmap_walk_control *rwc)
{
VM_BUG_ON(!PageLocked(page));
if (unlikely(PageKsm(page)))
return rmap_walk_ksm(page, rmap_one, arg);
return rmap_walk_ksm(page, rwc);
else if (PageAnon(page))
return rmap_walk_anon(page, rmap_one, arg);
return rmap_walk_anon(page, rwc);
else
return rmap_walk_file(page, rmap_one, arg);
return rmap_walk_file(page, rwc);
}
#endif /* CONFIG_MIGRATION */