2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-30 08:04:13 +08:00

staging: erofs: decompress asynchronously if PG_readahead page at first

For the case of nr_to_read == lookahead_size, it is better to
decompress asynchronously as well since no page will be needed immediately.

Reviewed-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Gao Xiang <gaoxiang25@huawei.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Gao Xiang 2018-11-23 01:21:48 +08:00 committed by Greg Kroah-Hartman
parent 23edf3abe7
commit 2d9b5dcd99

View File

@ -1345,8 +1345,8 @@ static int z_erofs_vle_normalaccess_readpages(struct file *filp,
{
struct inode *const inode = mapping->host;
struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
const bool sync = __should_decompress_synchronously(sbi, nr_pages);
bool sync = __should_decompress_synchronously(sbi, nr_pages);
struct z_erofs_vle_frontend f = VLE_FRONTEND_INIT(inode);
gfp_t gfp = mapping_gfp_constraint(mapping, GFP_KERNEL);
struct page *head = NULL;
@ -1364,6 +1364,13 @@ static int z_erofs_vle_normalaccess_readpages(struct file *filp,
prefetchw(&page->flags);
list_del(&page->lru);
/*
* A pure asynchronous readahead is indicated if
* a PG_readahead marked page is hitted at first.
* Let's also do asynchronous decompression for this case.
*/
sync &= !(PageReadahead(page) && !head);
if (add_to_page_cache_lru(page, mapping, page->index, gfp)) {
list_add(&page->lru, &pagepool);
continue;