mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-27 14:14:24 +08:00
xfs: add file_{get,put}_folio
Add helper similar to file_{get,set}_page, but which deal with folios and don't allocate new folio unless explicitly asked to, which map to shmem_get_folio instead of calling into the aops. Signed-off-by: "Darrick J. Wong" <djwong@kernel.org> Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Kent Overstreet <kent.overstreet@linux.dev> Signed-off-by: Chandan Babu R <chandanbabu@kernel.org>
This commit is contained in:
parent
e97d70a573
commit
6907e3c00a
@ -908,6 +908,8 @@ DEFINE_XFILE_EVENT(xfile_store);
|
||||
DEFINE_XFILE_EVENT(xfile_seek_data);
|
||||
DEFINE_XFILE_EVENT(xfile_get_page);
|
||||
DEFINE_XFILE_EVENT(xfile_put_page);
|
||||
DEFINE_XFILE_EVENT(xfile_get_folio);
|
||||
DEFINE_XFILE_EVENT(xfile_put_folio);
|
||||
|
||||
TRACE_EVENT(xfarray_create,
|
||||
TP_PROTO(struct xfarray *xfa, unsigned long long required_capacity),
|
||||
|
@ -340,3 +340,77 @@ xfile_put_page(
|
||||
return -EIO;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Grab the (locked) folio for a memory object. The object cannot span a folio
|
||||
* boundary. Returns the locked folio if successful, NULL if there was no
|
||||
* folio or it didn't cover the range requested, or an ERR_PTR on failure.
|
||||
*/
|
||||
struct folio *
|
||||
xfile_get_folio(
|
||||
struct xfile *xf,
|
||||
loff_t pos,
|
||||
size_t len,
|
||||
unsigned int flags)
|
||||
{
|
||||
struct inode *inode = file_inode(xf->file);
|
||||
struct folio *folio = NULL;
|
||||
unsigned int pflags;
|
||||
int error;
|
||||
|
||||
if (inode->i_sb->s_maxbytes - pos < len)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
trace_xfile_get_folio(xf, pos, len);
|
||||
|
||||
/*
|
||||
* Increase the file size first so that shmem_get_folio(..., SGP_CACHE),
|
||||
* actually allocates a folio instead of erroring out.
|
||||
*/
|
||||
if ((flags & XFILE_ALLOC) && pos + len > i_size_read(inode))
|
||||
i_size_write(inode, pos + len);
|
||||
|
||||
pflags = memalloc_nofs_save();
|
||||
error = shmem_get_folio(inode, pos >> PAGE_SHIFT, &folio,
|
||||
(flags & XFILE_ALLOC) ? SGP_CACHE : SGP_READ);
|
||||
memalloc_nofs_restore(pflags);
|
||||
if (error)
|
||||
return ERR_PTR(error);
|
||||
|
||||
if (!folio)
|
||||
return NULL;
|
||||
|
||||
if (len > folio_size(folio) - offset_in_folio(folio, pos)) {
|
||||
folio_unlock(folio);
|
||||
folio_put(folio);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (filemap_check_wb_err(inode->i_mapping, 0)) {
|
||||
folio_unlock(folio);
|
||||
folio_put(folio);
|
||||
return ERR_PTR(-EIO);
|
||||
}
|
||||
|
||||
/*
|
||||
* Mark the folio dirty so that it won't be reclaimed once we drop the
|
||||
* (potentially last) reference in xfile_put_folio.
|
||||
*/
|
||||
if (flags & XFILE_ALLOC)
|
||||
folio_set_dirty(folio);
|
||||
return folio;
|
||||
}
|
||||
|
||||
/*
|
||||
* Release the (locked) folio for a memory object.
|
||||
*/
|
||||
void
|
||||
xfile_put_folio(
|
||||
struct xfile *xf,
|
||||
struct folio *folio)
|
||||
{
|
||||
trace_xfile_put_folio(xf, folio_pos(folio), folio_size(folio));
|
||||
|
||||
folio_unlock(folio);
|
||||
folio_put(folio);
|
||||
}
|
||||
|
@ -39,4 +39,11 @@ int xfile_get_page(struct xfile *xf, loff_t offset, unsigned int len,
|
||||
struct xfile_page *xbuf);
|
||||
int xfile_put_page(struct xfile *xf, struct xfile_page *xbuf);
|
||||
|
||||
#define XFILE_MAX_FOLIO_SIZE (PAGE_SIZE << MAX_PAGECACHE_ORDER)
|
||||
|
||||
#define XFILE_ALLOC (1 << 0) /* allocate folio if not present */
|
||||
struct folio *xfile_get_folio(struct xfile *xf, loff_t offset, size_t len,
|
||||
unsigned int flags);
|
||||
void xfile_put_folio(struct xfile *xf, struct folio *folio);
|
||||
|
||||
#endif /* __XFS_SCRUB_XFILE_H__ */
|
||||
|
Loading…
Reference in New Issue
Block a user