mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-29 23:53:55 +08:00
fs: Add free_folio address space operation
Include documentation and convert the callers to use ->free_folio as well as ->freepage. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
This commit is contained in:
parent
6439476311
commit
d2329aa0c7
@ -250,7 +250,7 @@ prototypes::
|
|||||||
sector_t (*bmap)(struct address_space *, sector_t);
|
sector_t (*bmap)(struct address_space *, sector_t);
|
||||||
void (*invalidate_folio) (struct folio *, size_t start, size_t len);
|
void (*invalidate_folio) (struct folio *, size_t start, size_t len);
|
||||||
bool (*release_folio)(struct folio *, gfp_t);
|
bool (*release_folio)(struct folio *, gfp_t);
|
||||||
void (*freepage)(struct page *);
|
void (*free_folio)(struct folio *);
|
||||||
int (*direct_IO)(struct kiocb *, struct iov_iter *iter);
|
int (*direct_IO)(struct kiocb *, struct iov_iter *iter);
|
||||||
bool (*isolate_page) (struct page *, isolate_mode_t);
|
bool (*isolate_page) (struct page *, isolate_mode_t);
|
||||||
int (*migratepage)(struct address_space *, struct page *, struct page *);
|
int (*migratepage)(struct address_space *, struct page *, struct page *);
|
||||||
@ -262,10 +262,10 @@ prototypes::
|
|||||||
int (*swap_deactivate)(struct file *);
|
int (*swap_deactivate)(struct file *);
|
||||||
|
|
||||||
locking rules:
|
locking rules:
|
||||||
All except dirty_folio and freepage may block
|
All except dirty_folio and free_folio may block
|
||||||
|
|
||||||
====================== ======================== ========= ===============
|
====================== ======================== ========= ===============
|
||||||
ops PageLocked(page) i_rwsem invalidate_lock
|
ops folio locked i_rwsem invalidate_lock
|
||||||
====================== ======================== ========= ===============
|
====================== ======================== ========= ===============
|
||||||
writepage: yes, unlocks (see below)
|
writepage: yes, unlocks (see below)
|
||||||
read_folio: yes, unlocks shared
|
read_folio: yes, unlocks shared
|
||||||
@ -277,7 +277,7 @@ write_end: yes, unlocks exclusive
|
|||||||
bmap:
|
bmap:
|
||||||
invalidate_folio: yes exclusive
|
invalidate_folio: yes exclusive
|
||||||
release_folio: yes
|
release_folio: yes
|
||||||
freepage: yes
|
free_folio: yes
|
||||||
direct_IO:
|
direct_IO:
|
||||||
isolate_page: yes
|
isolate_page: yes
|
||||||
migratepage: yes (both)
|
migratepage: yes (both)
|
||||||
@ -377,7 +377,7 @@ buffers from the folio in preparation for freeing it. It returns false to
|
|||||||
indicate that the buffers are (or may be) freeable. If ->release_folio is
|
indicate that the buffers are (or may be) freeable. If ->release_folio is
|
||||||
NULL, the kernel assumes that the fs has no private interest in the buffers.
|
NULL, the kernel assumes that the fs has no private interest in the buffers.
|
||||||
|
|
||||||
->freepage() is called when the kernel is done dropping the page
|
->free_folio() is called when the kernel has dropped the folio
|
||||||
from the page cache.
|
from the page cache.
|
||||||
|
|
||||||
->launder_folio() may be called prior to releasing a folio if
|
->launder_folio() may be called prior to releasing a folio if
|
||||||
|
@ -735,7 +735,7 @@ cache in your filesystem. The following members are defined:
|
|||||||
sector_t (*bmap)(struct address_space *, sector_t);
|
sector_t (*bmap)(struct address_space *, sector_t);
|
||||||
void (*invalidate_folio) (struct folio *, size_t start, size_t len);
|
void (*invalidate_folio) (struct folio *, size_t start, size_t len);
|
||||||
bool (*release_folio)(struct folio *, gfp_t);
|
bool (*release_folio)(struct folio *, gfp_t);
|
||||||
void (*freepage)(struct page *);
|
void (*free_folio)(struct folio *);
|
||||||
ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *iter);
|
ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *iter);
|
||||||
/* isolate a page for migration */
|
/* isolate a page for migration */
|
||||||
bool (*isolate_page) (struct page *, isolate_mode_t);
|
bool (*isolate_page) (struct page *, isolate_mode_t);
|
||||||
@ -891,8 +891,8 @@ cache in your filesystem. The following members are defined:
|
|||||||
its release_folio will need to ensure this. Possibly it can
|
its release_folio will need to ensure this. Possibly it can
|
||||||
clear the uptodate flag if it cannot free private data yet.
|
clear the uptodate flag if it cannot free private data yet.
|
||||||
|
|
||||||
``freepage``
|
``free_folio``
|
||||||
freepage is called once the page is no longer visible in the
|
free_folio is called once the folio is no longer visible in the
|
||||||
page cache in order to allow the cleanup of any private data.
|
page cache in order to allow the cleanup of any private data.
|
||||||
Since it may be called by the memory reclaimer, it should not
|
Since it may be called by the memory reclaimer, it should not
|
||||||
assume that the original address_space mapping still exists, and
|
assume that the original address_space mapping still exists, and
|
||||||
|
@ -356,6 +356,7 @@ struct address_space_operations {
|
|||||||
sector_t (*bmap)(struct address_space *, sector_t);
|
sector_t (*bmap)(struct address_space *, sector_t);
|
||||||
void (*invalidate_folio) (struct folio *, size_t offset, size_t len);
|
void (*invalidate_folio) (struct folio *, size_t offset, size_t len);
|
||||||
bool (*release_folio)(struct folio *, gfp_t);
|
bool (*release_folio)(struct folio *, gfp_t);
|
||||||
|
void (*free_folio)(struct folio *folio);
|
||||||
void (*freepage)(struct page *);
|
void (*freepage)(struct page *);
|
||||||
ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *iter);
|
ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *iter);
|
||||||
/*
|
/*
|
||||||
|
@ -226,8 +226,12 @@ void __filemap_remove_folio(struct folio *folio, void *shadow)
|
|||||||
void filemap_free_folio(struct address_space *mapping, struct folio *folio)
|
void filemap_free_folio(struct address_space *mapping, struct folio *folio)
|
||||||
{
|
{
|
||||||
void (*freepage)(struct page *);
|
void (*freepage)(struct page *);
|
||||||
|
void (*free_folio)(struct folio *);
|
||||||
int refs = 1;
|
int refs = 1;
|
||||||
|
|
||||||
|
free_folio = mapping->a_ops->free_folio;
|
||||||
|
if (free_folio)
|
||||||
|
free_folio(folio);
|
||||||
freepage = mapping->a_ops->freepage;
|
freepage = mapping->a_ops->freepage;
|
||||||
if (freepage)
|
if (freepage)
|
||||||
freepage(&folio->page);
|
freepage(&folio->page);
|
||||||
@ -807,6 +811,7 @@ void replace_page_cache_page(struct page *old, struct page *new)
|
|||||||
struct folio *fold = page_folio(old);
|
struct folio *fold = page_folio(old);
|
||||||
struct folio *fnew = page_folio(new);
|
struct folio *fnew = page_folio(new);
|
||||||
struct address_space *mapping = old->mapping;
|
struct address_space *mapping = old->mapping;
|
||||||
|
void (*free_folio)(struct folio *) = mapping->a_ops->free_folio;
|
||||||
void (*freepage)(struct page *) = mapping->a_ops->freepage;
|
void (*freepage)(struct page *) = mapping->a_ops->freepage;
|
||||||
pgoff_t offset = old->index;
|
pgoff_t offset = old->index;
|
||||||
XA_STATE(xas, &mapping->i_pages, offset);
|
XA_STATE(xas, &mapping->i_pages, offset);
|
||||||
@ -835,9 +840,11 @@ void replace_page_cache_page(struct page *old, struct page *new)
|
|||||||
if (PageSwapBacked(new))
|
if (PageSwapBacked(new))
|
||||||
__inc_lruvec_page_state(new, NR_SHMEM);
|
__inc_lruvec_page_state(new, NR_SHMEM);
|
||||||
xas_unlock_irq(&xas);
|
xas_unlock_irq(&xas);
|
||||||
|
if (free_folio)
|
||||||
|
free_folio(fold);
|
||||||
if (freepage)
|
if (freepage)
|
||||||
freepage(old);
|
freepage(old);
|
||||||
put_page(old);
|
folio_put(fold);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(replace_page_cache_page);
|
EXPORT_SYMBOL_GPL(replace_page_cache_page);
|
||||||
|
|
||||||
|
@ -1282,8 +1282,10 @@ static int __remove_mapping(struct address_space *mapping, struct folio *folio,
|
|||||||
xa_unlock_irq(&mapping->i_pages);
|
xa_unlock_irq(&mapping->i_pages);
|
||||||
put_swap_page(&folio->page, swap);
|
put_swap_page(&folio->page, swap);
|
||||||
} else {
|
} else {
|
||||||
|
void (*free_folio)(struct folio *);
|
||||||
void (*freepage)(struct page *);
|
void (*freepage)(struct page *);
|
||||||
|
|
||||||
|
free_folio = mapping->a_ops->free_folio;
|
||||||
freepage = mapping->a_ops->freepage;
|
freepage = mapping->a_ops->freepage;
|
||||||
/*
|
/*
|
||||||
* Remember a shadow entry for reclaimed file cache in
|
* Remember a shadow entry for reclaimed file cache in
|
||||||
@ -1310,7 +1312,9 @@ static int __remove_mapping(struct address_space *mapping, struct folio *folio,
|
|||||||
inode_add_lru(mapping->host);
|
inode_add_lru(mapping->host);
|
||||||
spin_unlock(&mapping->host->i_lock);
|
spin_unlock(&mapping->host->i_lock);
|
||||||
|
|
||||||
if (freepage != NULL)
|
if (free_folio)
|
||||||
|
free_folio(folio);
|
||||||
|
if (freepage)
|
||||||
freepage(&folio->page);
|
freepage(&folio->page);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user