mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-24 12:44:11 +08:00
mm: split ->readpages calls to avoid non-contiguous pages lists
That way file systems don't have to go spotting for non-contiguous pages and work around them. It also kicks off I/O earlier, allowing it to finish earlier and reduce latency. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com> Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
This commit is contained in:
parent
c534aa3fdd
commit
b3751e6ab4
@ -140,8 +140,8 @@ out:
|
||||
}
|
||||
|
||||
/*
|
||||
* __do_page_cache_readahead() actually reads a chunk of disk. It allocates all
|
||||
* the pages first, then submits them all for I/O. This avoids the very bad
|
||||
* __do_page_cache_readahead() actually reads a chunk of disk. It allocates
|
||||
* the pages first, then submits them for I/O. This avoids the very bad
|
||||
* behaviour which would occur if page allocations are causing VM writeback.
|
||||
* We really don't want to intermingle reads and writes like that.
|
||||
*
|
||||
@ -177,8 +177,18 @@ unsigned int __do_page_cache_readahead(struct address_space *mapping,
|
||||
rcu_read_lock();
|
||||
page = radix_tree_lookup(&mapping->i_pages, page_offset);
|
||||
rcu_read_unlock();
|
||||
if (page && !radix_tree_exceptional_entry(page))
|
||||
if (page && !radix_tree_exceptional_entry(page)) {
|
||||
/*
|
||||
* Page already present? Kick off the current batch of
|
||||
* contiguous pages before continuing with the next
|
||||
* batch.
|
||||
*/
|
||||
if (nr_pages)
|
||||
read_pages(mapping, filp, &page_pool, nr_pages,
|
||||
gfp_mask);
|
||||
nr_pages = 0;
|
||||
continue;
|
||||
}
|
||||
|
||||
page = __page_cache_alloc(gfp_mask);
|
||||
if (!page)
|
||||
|
Loading…
Reference in New Issue
Block a user