mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-09-21 20:22:13 +08:00
mm: increase usage of folio_next_index() helper
Simplify code pattern of 'folio->index + folio_nr_pages(folio)' by using the existing helper folio_next_index(). Link: https://lkml.kernel.org/r/20230627174349.491803-1-sidhartha.kumar@oracle.com Signed-off-by: Sidhartha Kumar <sidhartha.kumar@oracle.com> Suggested-by: Christoph Hellwig <hch@infradead.org> Reviewed-by: Christoph Hellwig <hch@lst.de> Cc: Andreas Dilger <adilger.kernel@dilger.ca> Cc: Christoph Hellwig <hch@infradead.org> Cc: Hugh Dickins <hughd@google.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Theodore Ts'o <tytso@mit.edu> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
3a29280afb
commit
87b11f8622
@ -1569,7 +1569,7 @@ static void mpage_release_unused_pages(struct mpage_da_data *mpd,
|
|||||||
|
|
||||||
if (folio->index < mpd->first_page)
|
if (folio->index < mpd->first_page)
|
||||||
continue;
|
continue;
|
||||||
if (folio->index + folio_nr_pages(folio) - 1 > end)
|
if (folio_next_index(folio) - 1 > end)
|
||||||
continue;
|
continue;
|
||||||
BUG_ON(!folio_test_locked(folio));
|
BUG_ON(!folio_test_locked(folio));
|
||||||
BUG_ON(folio_test_writeback(folio));
|
BUG_ON(folio_test_writeback(folio));
|
||||||
@ -2455,7 +2455,7 @@ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
|
|||||||
|
|
||||||
if (mpd->map.m_len == 0)
|
if (mpd->map.m_len == 0)
|
||||||
mpd->first_page = folio->index;
|
mpd->first_page = folio->index;
|
||||||
mpd->next_page = folio->index + folio_nr_pages(folio);
|
mpd->next_page = folio_next_index(folio);
|
||||||
/*
|
/*
|
||||||
* Writeout when we cannot modify metadata is simple.
|
* Writeout when we cannot modify metadata is simple.
|
||||||
* Just submit the page. For data=journal mode we
|
* Just submit the page. For data=journal mode we
|
||||||
|
@ -2075,7 +2075,7 @@ unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start,
|
|||||||
if (!xa_is_value(folio)) {
|
if (!xa_is_value(folio)) {
|
||||||
if (folio->index < *start)
|
if (folio->index < *start)
|
||||||
goto put;
|
goto put;
|
||||||
if (folio->index + folio_nr_pages(folio) - 1 > end)
|
if (folio_next_index(folio) - 1 > end)
|
||||||
goto put;
|
goto put;
|
||||||
if (!folio_trylock(folio))
|
if (!folio_trylock(folio))
|
||||||
goto put;
|
goto put;
|
||||||
@ -2174,7 +2174,7 @@ bool folio_more_pages(struct folio *folio, pgoff_t index, pgoff_t max)
|
|||||||
return false;
|
return false;
|
||||||
if (index >= max)
|
if (index >= max)
|
||||||
return false;
|
return false;
|
||||||
return index < folio->index + folio_nr_pages(folio) - 1;
|
return index < folio_next_index(folio) - 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -2242,7 +2242,7 @@ update_start:
|
|||||||
if (folio_test_hugetlb(folio))
|
if (folio_test_hugetlb(folio))
|
||||||
*start = folio->index + 1;
|
*start = folio->index + 1;
|
||||||
else
|
else
|
||||||
*start = folio->index + folio_nr_pages(folio);
|
*start = folio_next_index(folio);
|
||||||
}
|
}
|
||||||
out:
|
out:
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
@ -2359,7 +2359,7 @@ static void filemap_get_read_batch(struct address_space *mapping,
|
|||||||
break;
|
break;
|
||||||
if (folio_test_readahead(folio))
|
if (folio_test_readahead(folio))
|
||||||
break;
|
break;
|
||||||
xas_advance(&xas, folio->index + folio_nr_pages(folio) - 1);
|
xas_advance(&xas, folio_next_index(folio) - 1);
|
||||||
continue;
|
continue;
|
||||||
put_folio:
|
put_folio:
|
||||||
folio_put(folio);
|
folio_put(folio);
|
||||||
|
@ -3495,7 +3495,7 @@ void unmap_mapping_folio(struct folio *folio)
|
|||||||
VM_BUG_ON(!folio_test_locked(folio));
|
VM_BUG_ON(!folio_test_locked(folio));
|
||||||
|
|
||||||
first_index = folio->index;
|
first_index = folio->index;
|
||||||
last_index = folio->index + folio_nr_pages(folio) - 1;
|
last_index = folio_next_index(folio) - 1;
|
||||||
|
|
||||||
details.even_cows = false;
|
details.even_cows = false;
|
||||||
details.single_folio = folio;
|
details.single_folio = folio;
|
||||||
|
@ -970,7 +970,7 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
|
|||||||
same_folio = lend < folio_pos(folio) + folio_size(folio);
|
same_folio = lend < folio_pos(folio) + folio_size(folio);
|
||||||
folio_mark_dirty(folio);
|
folio_mark_dirty(folio);
|
||||||
if (!truncate_inode_partial_folio(folio, lstart, lend)) {
|
if (!truncate_inode_partial_folio(folio, lstart, lend)) {
|
||||||
start = folio->index + folio_nr_pages(folio);
|
start = folio_next_index(folio);
|
||||||
if (same_folio)
|
if (same_folio)
|
||||||
end = folio->index;
|
end = folio->index;
|
||||||
}
|
}
|
||||||
|
@ -378,7 +378,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
|
|||||||
if (!IS_ERR(folio)) {
|
if (!IS_ERR(folio)) {
|
||||||
same_folio = lend < folio_pos(folio) + folio_size(folio);
|
same_folio = lend < folio_pos(folio) + folio_size(folio);
|
||||||
if (!truncate_inode_partial_folio(folio, lstart, lend)) {
|
if (!truncate_inode_partial_folio(folio, lstart, lend)) {
|
||||||
start = folio->index + folio_nr_pages(folio);
|
start = folio_next_index(folio);
|
||||||
if (same_folio)
|
if (same_folio)
|
||||||
end = folio->index;
|
end = folio->index;
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user