diff --git a/mm/migrate.c b/mm/migrate.c index 9a1c5b4a3f99..13b032269242 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -393,28 +393,23 @@ static int folio_expected_refs(struct address_space *mapping, } /* - * Replace the page in the mapping. + * Replace the folio in the mapping. * * The number of remaining references must be: - * 1 for anonymous pages without a mapping - * 2 for pages with a mapping - * 3 for pages with a mapping and PagePrivate/PagePrivate2 set. + * 1 for anonymous folios without a mapping + * 2 for folios with a mapping + * 3 for folios with a mapping and PagePrivate/PagePrivate2 set. */ -int folio_migrate_mapping(struct address_space *mapping, - struct folio *newfolio, struct folio *folio, int extra_count) +static int __folio_migrate_mapping(struct address_space *mapping, + struct folio *newfolio, struct folio *folio, int expected_count) { XA_STATE(xas, &mapping->i_pages, folio_index(folio)); struct zone *oldzone, *newzone; int dirty; - int expected_count = folio_expected_refs(mapping, folio) + extra_count; long nr = folio_nr_pages(folio); long entries, i; if (!mapping) { - /* Anonymous page without mapping */ - if (folio_ref_count(folio) != expected_count) - return -EAGAIN; - /* Take off deferred split queue while frozen and memcg set */ if (folio_test_large(folio) && folio_test_large_rmappable(folio)) { @@ -465,7 +460,7 @@ int folio_migrate_mapping(struct address_space *mapping, entries = 1; } - /* Move dirty while page refs frozen and newpage not yet exposed */ + /* Move dirty while folio refs frozen and newfolio not yet exposed */ dirty = folio_test_dirty(folio); if (dirty) { folio_clear_dirty(folio); @@ -479,7 +474,7 @@ int folio_migrate_mapping(struct address_space *mapping, } /* - * Drop cache reference from old page by unfreezing + * Drop cache reference from old folio by unfreezing * to one less reference. * We know this isn't the last reference. */ @@ -490,11 +485,11 @@ int folio_migrate_mapping(struct address_space *mapping, /* * If moved to a different zone then also account - * the page for that zone. Other VM counters will be + * the folio for that zone. Other VM counters will be * taken care of when we establish references to the - * new page and drop references to the old page. + * new folio and drop references to the old folio. * - * Note that anonymous pages are accounted for + * Note that anonymous folios are accounted for * via NR_FILE_PAGES and NR_ANON_MAPPED if they * are mapped to swap space. */ @@ -534,6 +529,17 @@ int folio_migrate_mapping(struct address_space *mapping, return MIGRATEPAGE_SUCCESS; } + +int folio_migrate_mapping(struct address_space *mapping, + struct folio *newfolio, struct folio *folio, int extra_count) +{ + int expected_count = folio_expected_refs(mapping, folio) + extra_count; + + if (folio_ref_count(folio) != expected_count) + return -EAGAIN; + + return __folio_migrate_mapping(mapping, newfolio, folio, expected_count); +} EXPORT_SYMBOL(folio_migrate_mapping); /*