mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 04:18:39 +08:00
mpage: convert __mpage_writepage() to use a folio more fully
This is just a conversion to the folio API. While there are some nods towards supporting multi-page folios in here, the blocks array is still sized for one page's worth of blocks, and there are other assumptions such as the blocks_per_page variable. [willy@infradead.org: fix accidentally-triggering WARN_ON_ONCE] Link: https://lkml.kernel.org/r/Y9kuaBgXf9lKJ8b0@casper.infradead.org Link: https://lkml.kernel.org/r/20230126201255.1681189-3-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Christoph Hellwig <hch@infradead.org> Cc: Jan Kara <jack@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
d585bdbeb7
commit
9160cffd45
46
fs/mpage.c
46
fs/mpage.c
@ -443,13 +443,11 @@ void clean_page_buffers(struct page *page)
|
||||
static int __mpage_writepage(struct folio *folio, struct writeback_control *wbc,
|
||||
void *data)
|
||||
{
|
||||
struct page *page = &folio->page;
|
||||
struct mpage_data *mpd = data;
|
||||
struct bio *bio = mpd->bio;
|
||||
struct address_space *mapping = page->mapping;
|
||||
struct inode *inode = page->mapping->host;
|
||||
struct address_space *mapping = folio->mapping;
|
||||
struct inode *inode = mapping->host;
|
||||
const unsigned blkbits = inode->i_blkbits;
|
||||
unsigned long end_index;
|
||||
const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
|
||||
sector_t last_block;
|
||||
sector_t block_in_file;
|
||||
@ -460,13 +458,13 @@ static int __mpage_writepage(struct folio *folio, struct writeback_control *wbc,
|
||||
int boundary = 0;
|
||||
sector_t boundary_block = 0;
|
||||
struct block_device *boundary_bdev = NULL;
|
||||
int length;
|
||||
size_t length;
|
||||
struct buffer_head map_bh;
|
||||
loff_t i_size = i_size_read(inode);
|
||||
int ret = 0;
|
||||
struct buffer_head *head = folio_buffers(folio);
|
||||
|
||||
if (page_has_buffers(page)) {
|
||||
struct buffer_head *head = page_buffers(page);
|
||||
if (head) {
|
||||
struct buffer_head *bh = head;
|
||||
|
||||
/* If they're all mapped and dirty, do it */
|
||||
@ -518,8 +516,8 @@ static int __mpage_writepage(struct folio *folio, struct writeback_control *wbc,
|
||||
/*
|
||||
* The page has no buffers: map it to disk
|
||||
*/
|
||||
BUG_ON(!PageUptodate(page));
|
||||
block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits);
|
||||
BUG_ON(!folio_test_uptodate(folio));
|
||||
block_in_file = (sector_t)folio->index << (PAGE_SHIFT - blkbits);
|
||||
/*
|
||||
* Whole page beyond EOF? Skip allocating blocks to avoid leaking
|
||||
* space.
|
||||
@ -527,7 +525,7 @@ static int __mpage_writepage(struct folio *folio, struct writeback_control *wbc,
|
||||
if (block_in_file >= (i_size + (1 << blkbits) - 1) >> blkbits)
|
||||
goto page_is_mapped;
|
||||
last_block = (i_size - 1) >> blkbits;
|
||||
map_bh.b_page = page;
|
||||
map_bh.b_folio = folio;
|
||||
for (page_block = 0; page_block < blocks_per_page; ) {
|
||||
|
||||
map_bh.b_state = 0;
|
||||
@ -556,8 +554,11 @@ static int __mpage_writepage(struct folio *folio, struct writeback_control *wbc,
|
||||
first_unmapped = page_block;
|
||||
|
||||
page_is_mapped:
|
||||
end_index = i_size >> PAGE_SHIFT;
|
||||
if (page->index >= end_index) {
|
||||
/* Don't bother writing beyond EOF, truncate will discard the folio */
|
||||
if (folio_pos(folio) >= i_size)
|
||||
goto confused;
|
||||
length = folio_size(folio);
|
||||
if (folio_pos(folio) + length > i_size) {
|
||||
/*
|
||||
* The page straddles i_size. It must be zeroed out on each
|
||||
* and every writepage invocation because it may be mmapped.
|
||||
@ -566,11 +567,8 @@ page_is_mapped:
|
||||
* is zeroed when mapped, and writes to that region are not
|
||||
* written out to the file."
|
||||
*/
|
||||
unsigned offset = i_size & (PAGE_SIZE - 1);
|
||||
|
||||
if (page->index > end_index || !offset)
|
||||
goto confused;
|
||||
zero_user_segment(page, offset, PAGE_SIZE);
|
||||
length = i_size - folio_pos(folio);
|
||||
folio_zero_segment(folio, length, folio_size(folio));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -593,18 +591,18 @@ alloc_new:
|
||||
* the confused fail path above (OOM) will be very confused when
|
||||
* it finds all bh marked clean (i.e. it will not write anything)
|
||||
*/
|
||||
wbc_account_cgroup_owner(wbc, page, PAGE_SIZE);
|
||||
wbc_account_cgroup_owner(wbc, &folio->page, folio_size(folio));
|
||||
length = first_unmapped << blkbits;
|
||||
if (bio_add_page(bio, page, length, 0) < length) {
|
||||
if (!bio_add_folio(bio, folio, length, 0)) {
|
||||
bio = mpage_bio_submit(bio);
|
||||
goto alloc_new;
|
||||
}
|
||||
|
||||
clean_buffers(page, first_unmapped);
|
||||
clean_buffers(&folio->page, first_unmapped);
|
||||
|
||||
BUG_ON(PageWriteback(page));
|
||||
set_page_writeback(page);
|
||||
unlock_page(page);
|
||||
BUG_ON(folio_test_writeback(folio));
|
||||
folio_start_writeback(folio);
|
||||
folio_unlock(folio);
|
||||
if (boundary || (first_unmapped != blocks_per_page)) {
|
||||
bio = mpage_bio_submit(bio);
|
||||
if (boundary_block) {
|
||||
@ -623,7 +621,7 @@ confused:
|
||||
/*
|
||||
* The caller has a ref on the inode, so *mapping is stable
|
||||
*/
|
||||
ret = block_write_full_page(page, mpd->get_block, wbc);
|
||||
ret = block_write_full_page(&folio->page, mpd->get_block, wbc);
|
||||
mapping_set_error(mapping, ret);
|
||||
out:
|
||||
mpd->bio = bio;
|
||||
|
Loading…
Reference in New Issue
Block a user