shmem: convert shmem_mfill_atomic_pte() to use a folio

Assert that this is a single-page folio as there are several assumptions
in here that it's exactly PAGE_SIZE bytes large.  Saves several calls to
compound_head() and removes the last caller of shmem_alloc_page().

Link: https://lkml.kernel.org/r/20220902194653.1739778-18-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Matthew Wilcox (Oracle) 2022-09-02 20:46:13 +01:00 committed by Andrew Morton
parent 6599591816
commit 7a7256d5f5

View File

@ -2374,12 +2374,6 @@ static struct inode *shmem_get_inode(struct super_block *sb, struct inode *dir,
}
#ifdef CONFIG_USERFAULTFD
static struct page *shmem_alloc_page(gfp_t gfp,
struct shmem_inode_info *info, pgoff_t index)
{
return &shmem_alloc_folio(gfp, info, index)->page;
}
int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
pmd_t *dst_pmd,
struct vm_area_struct *dst_vma,
@ -2395,7 +2389,6 @@ int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
void *page_kaddr;
struct folio *folio;
struct page *page;
int ret;
pgoff_t max_off;
@ -2414,53 +2407,53 @@ int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
if (!*pagep) {
ret = -ENOMEM;
page = shmem_alloc_page(gfp, info, pgoff);
if (!page)
folio = shmem_alloc_folio(gfp, info, pgoff);
if (!folio)
goto out_unacct_blocks;
if (!zeropage) { /* COPY */
page_kaddr = kmap_atomic(page);
page_kaddr = kmap_local_folio(folio, 0);
ret = copy_from_user(page_kaddr,
(const void __user *)src_addr,
PAGE_SIZE);
kunmap_atomic(page_kaddr);
kunmap_local(page_kaddr);
/* fallback to copy_from_user outside mmap_lock */
if (unlikely(ret)) {
*pagep = page;
*pagep = &folio->page;
ret = -ENOENT;
/* don't free the page */
goto out_unacct_blocks;
}
flush_dcache_page(page);
flush_dcache_folio(folio);
} else { /* ZEROPAGE */
clear_user_highpage(page, dst_addr);
clear_user_highpage(&folio->page, dst_addr);
}
} else {
page = *pagep;
folio = page_folio(*pagep);
VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
*pagep = NULL;
}
VM_BUG_ON(PageLocked(page));
VM_BUG_ON(PageSwapBacked(page));
__SetPageLocked(page);
__SetPageSwapBacked(page);
__SetPageUptodate(page);
VM_BUG_ON(folio_test_locked(folio));
VM_BUG_ON(folio_test_swapbacked(folio));
__folio_set_locked(folio);
__folio_set_swapbacked(folio);
__folio_mark_uptodate(folio);
ret = -EFAULT;
max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
if (unlikely(pgoff >= max_off))
goto out_release;
folio = page_folio(page);
ret = shmem_add_to_page_cache(folio, mapping, pgoff, NULL,
gfp & GFP_RECLAIM_MASK, dst_mm);
if (ret)
goto out_release;
ret = mfill_atomic_install_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
page, true, wp_copy);
&folio->page, true, wp_copy);
if (ret)
goto out_delete_from_cache;
@ -2470,13 +2463,13 @@ int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
shmem_recalc_inode(inode);
spin_unlock_irq(&info->lock);
unlock_page(page);
folio_unlock(folio);
return 0;
out_delete_from_cache:
delete_from_page_cache(page);
filemap_remove_folio(folio);
out_release:
unlock_page(page);
put_page(page);
folio_unlock(folio);
folio_put(folio);
out_unacct_blocks:
shmem_inode_unacct_blocks(inode, 1);
return ret;