mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 04:18:39 +08:00
mm: fix numa stats for thp migration
Currently the kernel is not correctly updating the numa stats for
NR_FILE_PAGES and NR_SHMEM on THP migration. Fix that.
For NR_FILE_DIRTY and NR_ZONE_WRITE_PENDING, although at the moment
there is no need to handle THP migration as kernel still does not have
write support for file THP but to be more future proof, this patch adds
the THP support for those stats as well.
Link: https://lkml.kernel.org/r/20210108155813.2914586-2-shakeelb@google.com
Fixes: e71769ae52
("mm: enable thp migration for shmem thp")
Signed-off-by: Shakeel Butt <shakeelb@google.com>
Acked-by: Yang Shi <shy828301@gmail.com>
Reviewed-by: Roman Gushchin <guro@fb.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Muchun Song <songmuchun@bytedance.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
8a8792f600
commit
5c447d274f
23
mm/migrate.c
23
mm/migrate.c
@ -402,6 +402,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
|
||||
struct zone *oldzone, *newzone;
|
||||
int dirty;
|
||||
int expected_count = expected_page_refs(mapping, page) + extra_count;
|
||||
int nr = thp_nr_pages(page);
|
||||
|
||||
if (!mapping) {
|
||||
/* Anonymous page without mapping */
|
||||
@ -437,7 +438,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
|
||||
*/
|
||||
newpage->index = page->index;
|
||||
newpage->mapping = page->mapping;
|
||||
page_ref_add(newpage, thp_nr_pages(page)); /* add cache reference */
|
||||
page_ref_add(newpage, nr); /* add cache reference */
|
||||
if (PageSwapBacked(page)) {
|
||||
__SetPageSwapBacked(newpage);
|
||||
if (PageSwapCache(page)) {
|
||||
@ -459,7 +460,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
|
||||
if (PageTransHuge(page)) {
|
||||
int i;
|
||||
|
||||
for (i = 1; i < HPAGE_PMD_NR; i++) {
|
||||
for (i = 1; i < nr; i++) {
|
||||
xas_next(&xas);
|
||||
xas_store(&xas, newpage);
|
||||
}
|
||||
@ -470,7 +471,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
|
||||
* to one less reference.
|
||||
* We know this isn't the last reference.
|
||||
*/
|
||||
page_ref_unfreeze(page, expected_count - thp_nr_pages(page));
|
||||
page_ref_unfreeze(page, expected_count - nr);
|
||||
|
||||
xas_unlock(&xas);
|
||||
/* Leave irq disabled to prevent preemption while updating stats */
|
||||
@ -493,17 +494,17 @@ int migrate_page_move_mapping(struct address_space *mapping,
|
||||
old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat);
|
||||
new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat);
|
||||
|
||||
__dec_lruvec_state(old_lruvec, NR_FILE_PAGES);
|
||||
__inc_lruvec_state(new_lruvec, NR_FILE_PAGES);
|
||||
__mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr);
|
||||
__mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr);
|
||||
if (PageSwapBacked(page) && !PageSwapCache(page)) {
|
||||
__dec_lruvec_state(old_lruvec, NR_SHMEM);
|
||||
__inc_lruvec_state(new_lruvec, NR_SHMEM);
|
||||
__mod_lruvec_state(old_lruvec, NR_SHMEM, -nr);
|
||||
__mod_lruvec_state(new_lruvec, NR_SHMEM, nr);
|
||||
}
|
||||
if (dirty && mapping_can_writeback(mapping)) {
|
||||
__dec_lruvec_state(old_lruvec, NR_FILE_DIRTY);
|
||||
__dec_zone_state(oldzone, NR_ZONE_WRITE_PENDING);
|
||||
__inc_lruvec_state(new_lruvec, NR_FILE_DIRTY);
|
||||
__inc_zone_state(newzone, NR_ZONE_WRITE_PENDING);
|
||||
__mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr);
|
||||
__mod_zone_page_state(oldzone, NR_ZONE_WRITE_PENDING, -nr);
|
||||
__mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr);
|
||||
__mod_zone_page_state(newzone, NR_ZONE_WRITE_PENDING, nr);
|
||||
}
|
||||
}
|
||||
local_irq_enable();
|
||||
|
Loading…
Reference in New Issue
Block a user