iomap: convert from readpages to readahead

Use the new readahead operation in iomap.  Convert XFS and ZoneFS to use
it.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
Reviewed-by: William Kucharski <william.kucharski@oracle.com>
Cc: Chao Yu <yuchao0@huawei.com>
Cc: Cong Wang <xiyou.wangcong@gmail.com>
Cc: Dave Chinner <dchinner@redhat.com>
Cc: Eric Biggers <ebiggers@google.com>
Cc: Gao Xiang <gaoxiang25@huawei.com>
Cc: Jaegeuk Kim <jaegeuk@kernel.org>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Joseph Qi <joseph.qi@linux.alibaba.com>
Cc: Junxiao Bi <junxiao.bi@oracle.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Zi Yan <ziy@nvidia.com>
Cc: Johannes Thumshirn <johannes.thumshirn@wdc.com>
Cc: Miklos Szeredi <mszeredi@redhat.com>
Link: http://lkml.kernel.org/r/20200414150233.24495-26-willy@infradead.org
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Matthew Wilcox (Oracle) 2020-06-01 21:47:34 -07:00 committed by Linus Torvalds
parent 76a0294eb1
commit 9d24a13a93
5 changed files with 41 additions and 74 deletions

View File

@ -214,9 +214,8 @@ iomap_read_end_io(struct bio *bio)
struct iomap_readpage_ctx {
struct page *cur_page;
bool cur_page_in_bio;
bool is_readahead;
struct bio *bio;
struct list_head *pages;
struct readahead_control *rac;
};
static void
@ -308,7 +307,7 @@ iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
if (ctx->bio)
submit_bio(ctx->bio);
if (ctx->is_readahead) /* same as readahead_gfp_mask */
if (ctx->rac) /* same as readahead_gfp_mask */
gfp |= __GFP_NORETRY | __GFP_NOWARN;
ctx->bio = bio_alloc(gfp, min(BIO_MAX_PAGES, nr_vecs));
/*
@ -319,7 +318,7 @@ iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
if (!ctx->bio)
ctx->bio = bio_alloc(orig_gfp, 1);
ctx->bio->bi_opf = REQ_OP_READ;
if (ctx->is_readahead)
if (ctx->rac)
ctx->bio->bi_opf |= REQ_RAHEAD;
ctx->bio->bi_iter.bi_sector = sector;
bio_set_dev(ctx->bio, iomap->bdev);
@ -375,36 +374,8 @@ iomap_readpage(struct page *page, const struct iomap_ops *ops)
}
EXPORT_SYMBOL_GPL(iomap_readpage);
static struct page *
iomap_next_page(struct inode *inode, struct list_head *pages, loff_t pos,
loff_t length, loff_t *done)
{
while (!list_empty(pages)) {
struct page *page = lru_to_page(pages);
if (page_offset(page) >= (u64)pos + length)
break;
list_del(&page->lru);
if (!add_to_page_cache_lru(page, inode->i_mapping, page->index,
GFP_NOFS))
return page;
/*
* If we already have a page in the page cache at index we are
* done. Upper layers don't care if it is uptodate after the
* readpages call itself as every page gets checked again once
* actually needed.
*/
*done += PAGE_SIZE;
put_page(page);
}
return NULL;
}
static loff_t
iomap_readpages_actor(struct inode *inode, loff_t pos, loff_t length,
iomap_readahead_actor(struct inode *inode, loff_t pos, loff_t length,
void *data, struct iomap *iomap, struct iomap *srcmap)
{
struct iomap_readpage_ctx *ctx = data;
@ -418,10 +389,7 @@ iomap_readpages_actor(struct inode *inode, loff_t pos, loff_t length,
ctx->cur_page = NULL;
}
if (!ctx->cur_page) {
ctx->cur_page = iomap_next_page(inode, ctx->pages,
pos, length, &done);
if (!ctx->cur_page)
break;
ctx->cur_page = readahead_page(ctx->rac);
ctx->cur_page_in_bio = false;
}
ret = iomap_readpage_actor(inode, pos + done, length - done,
@ -431,32 +399,43 @@ iomap_readpages_actor(struct inode *inode, loff_t pos, loff_t length,
return done;
}
int
iomap_readpages(struct address_space *mapping, struct list_head *pages,
unsigned nr_pages, const struct iomap_ops *ops)
/**
* iomap_readahead - Attempt to read pages from a file.
* @rac: Describes the pages to be read.
* @ops: The operations vector for the filesystem.
*
* This function is for filesystems to call to implement their readahead
* address_space operation.
*
* Context: The @ops callbacks may submit I/O (eg to read the addresses of
* blocks from disc), and may wait for it. The caller may be trying to
* access a different page, and so sleeping excessively should be avoided.
* It may allocate memory, but should avoid costly allocations. This
* function is called with memalloc_nofs set, so allocations will not cause
* the filesystem to be reentered.
*/
void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops)
{
struct inode *inode = rac->mapping->host;
loff_t pos = readahead_pos(rac);
loff_t length = readahead_length(rac);
struct iomap_readpage_ctx ctx = {
.pages = pages,
.is_readahead = true,
.rac = rac,
};
loff_t pos = page_offset(list_entry(pages->prev, struct page, lru));
loff_t last = page_offset(list_entry(pages->next, struct page, lru));
loff_t length = last - pos + PAGE_SIZE, ret = 0;
trace_iomap_readpages(mapping->host, nr_pages);
trace_iomap_readahead(inode, readahead_count(rac));
while (length > 0) {
ret = iomap_apply(mapping->host, pos, length, 0, ops,
&ctx, iomap_readpages_actor);
loff_t ret = iomap_apply(inode, pos, length, 0, ops,
&ctx, iomap_readahead_actor);
if (ret <= 0) {
WARN_ON_ONCE(ret == 0);
goto done;
break;
}
pos += ret;
length -= ret;
}
ret = 0;
done:
if (ctx.bio)
submit_bio(ctx.bio);
if (ctx.cur_page) {
@ -464,15 +443,8 @@ done:
unlock_page(ctx.cur_page);
put_page(ctx.cur_page);
}
/*
* Check that we didn't lose a page due to the arcance calling
* conventions..
*/
WARN_ON_ONCE(!ret && !list_empty(ctx.pages));
return ret;
}
EXPORT_SYMBOL_GPL(iomap_readpages);
EXPORT_SYMBOL_GPL(iomap_readahead);
/*
* iomap_is_partially_uptodate checks whether blocks within a page are

View File

@ -39,7 +39,7 @@ DEFINE_EVENT(iomap_readpage_class, name, \
TP_PROTO(struct inode *inode, int nr_pages), \
TP_ARGS(inode, nr_pages))
DEFINE_READPAGE_EVENT(iomap_readpage);
DEFINE_READPAGE_EVENT(iomap_readpages);
DEFINE_READPAGE_EVENT(iomap_readahead);
DECLARE_EVENT_CLASS(iomap_range_class,
TP_PROTO(struct inode *inode, unsigned long off, unsigned int len),

View File

@ -621,14 +621,11 @@ xfs_vm_readpage(
return iomap_readpage(page, &xfs_read_iomap_ops);
}
STATIC int
xfs_vm_readpages(
struct file *unused,
struct address_space *mapping,
struct list_head *pages,
unsigned nr_pages)
STATIC void
xfs_vm_readahead(
struct readahead_control *rac)
{
return iomap_readpages(mapping, pages, nr_pages, &xfs_read_iomap_ops);
iomap_readahead(rac, &xfs_read_iomap_ops);
}
static int
@ -644,7 +641,7 @@ xfs_iomap_swapfile_activate(
const struct address_space_operations xfs_address_space_operations = {
.readpage = xfs_vm_readpage,
.readpages = xfs_vm_readpages,
.readahead = xfs_vm_readahead,
.writepage = xfs_vm_writepage,
.writepages = xfs_vm_writepages,
.set_page_dirty = iomap_set_page_dirty,

View File

@ -78,10 +78,9 @@ static int zonefs_readpage(struct file *unused, struct page *page)
return iomap_readpage(page, &zonefs_iomap_ops);
}
static int zonefs_readpages(struct file *unused, struct address_space *mapping,
struct list_head *pages, unsigned int nr_pages)
static void zonefs_readahead(struct readahead_control *rac)
{
return iomap_readpages(mapping, pages, nr_pages, &zonefs_iomap_ops);
iomap_readahead(rac, &zonefs_iomap_ops);
}
/*
@ -128,7 +127,7 @@ static int zonefs_writepages(struct address_space *mapping,
static const struct address_space_operations zonefs_file_aops = {
.readpage = zonefs_readpage,
.readpages = zonefs_readpages,
.readahead = zonefs_readahead,
.writepage = zonefs_writepage,
.writepages = zonefs_writepages,
.set_page_dirty = iomap_set_page_dirty,

View File

@ -155,8 +155,7 @@ loff_t iomap_apply(struct inode *inode, loff_t pos, loff_t length,
ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from,
const struct iomap_ops *ops);
int iomap_readpage(struct page *page, const struct iomap_ops *ops);
int iomap_readpages(struct address_space *mapping, struct list_head *pages,
unsigned nr_pages, const struct iomap_ops *ops);
void iomap_readahead(struct readahead_control *, const struct iomap_ops *ops);
int iomap_set_page_dirty(struct page *page);
int iomap_is_partially_uptodate(struct page *page, unsigned long from,
unsigned long count);