memcg: convert mem_cgroup_swapin_charge_page() to mem_cgroup_swapin_charge_folio()

All callers now have a folio, so pass it in here and remove an unnecessary
call to page_folio().

Link: https://lkml.kernel.org/r/20220902194653.1739778-17-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Matthew Wilcox (Oracle) 2022-09-02 20:46:12 +01:00 committed by Andrew Morton
parent d4f9565ae5
commit 6599591816
4 changed files with 10 additions and 11 deletions

View File

@ -688,7 +688,7 @@ static inline int mem_cgroup_charge(struct folio *folio, struct mm_struct *mm,
return __mem_cgroup_charge(folio, mm, gfp);
}
int mem_cgroup_swapin_charge_page(struct page *page, struct mm_struct *mm,
int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm,
gfp_t gfp, swp_entry_t entry);
void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry);
@ -1254,7 +1254,7 @@ static inline int mem_cgroup_charge(struct folio *folio,
return 0;
}
static inline int mem_cgroup_swapin_charge_page(struct page *page,
static inline int mem_cgroup_swapin_charge_folio(struct folio *folio,
struct mm_struct *mm, gfp_t gfp, swp_entry_t entry)
{
return 0;

View File

@ -6844,21 +6844,20 @@ int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp)
}
/**
* mem_cgroup_swapin_charge_page - charge a newly allocated page for swapin
* @page: page to charge
* mem_cgroup_swapin_charge_folio - Charge a newly allocated folio for swapin.
* @folio: folio to charge.
* @mm: mm context of the victim
* @gfp: reclaim mode
* @entry: swap entry for which the page is allocated
* @entry: swap entry for which the folio is allocated
*
* This function charges a page allocated for swapin. Please call this before
* adding the page to the swapcache.
* This function charges a folio allocated for swapin. Please call this before
* adding the folio to the swapcache.
*
* Returns 0 on success. Otherwise, an error code is returned.
*/
int mem_cgroup_swapin_charge_page(struct page *page, struct mm_struct *mm,
int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm,
gfp_t gfp, swp_entry_t entry)
{
struct folio *folio = page_folio(page);
struct mem_cgroup *memcg;
unsigned short id;
int ret;

View File

@ -3783,7 +3783,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
__folio_set_locked(folio);
__folio_set_swapbacked(folio);
if (mem_cgroup_swapin_charge_page(page,
if (mem_cgroup_swapin_charge_folio(folio,
vma->vm_mm, GFP_KERNEL,
entry)) {
ret = VM_FAULT_OOM;

View File

@ -480,7 +480,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
__folio_set_locked(folio);
__folio_set_swapbacked(folio);
if (mem_cgroup_swapin_charge_page(&folio->page, NULL, gfp_mask, entry))
if (mem_cgroup_swapin_charge_folio(folio, NULL, gfp_mask, entry))
goto fail_unlock;
/* May fail (-ENOMEM) if XArray node allocation failed. */