mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-24 04:34:08 +08:00
mm: make __end_folio_writeback() return void
Rather than check the result of test-and-clear, just check that we have the writeback bit set at the start. This wouldn't catch every case, but it's good enough (and enables the next patch). Link: https://lkml.kernel.org/r/20231004165317.1061855-17-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Albert Ou <aou@eecs.berkeley.edu> Cc: Alexander Gordeev <agordeev@linux.ibm.com> Cc: Andreas Dilger <adilger.kernel@dilger.ca> Cc: Christian Borntraeger <borntraeger@linux.ibm.com> Cc: Christophe Leroy <christophe.leroy@csgroup.eu> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Heiko Carstens <hca@linux.ibm.com> Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru> Cc: Matt Turner <mattst88@gmail.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Palmer Dabbelt <palmer@dabbelt.com> Cc: Paul Walmsley <paul.walmsley@sifive.com> Cc: Richard Henderson <richard.henderson@linaro.org> Cc: Sven Schnelle <svens@linux.ibm.com> Cc: "Theodore Ts'o" <tytso@mit.edu> Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de> Cc: Vasily Gorbik <gor@linux.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
0410cd844e
commit
7d0795d098
@ -1593,9 +1593,15 @@ EXPORT_SYMBOL(folio_wait_private_2_killable);
|
||||
/**
|
||||
* folio_end_writeback - End writeback against a folio.
|
||||
* @folio: The folio.
|
||||
*
|
||||
* The folio must actually be under writeback.
|
||||
*
|
||||
* Context: May be called from process or interrupt context.
|
||||
*/
|
||||
void folio_end_writeback(struct folio *folio)
|
||||
{
|
||||
VM_BUG_ON_FOLIO(!folio_test_writeback(folio), folio);
|
||||
|
||||
/*
|
||||
* folio_test_clear_reclaim() could be used here but it is an
|
||||
* atomic operation and overkill in this particular case. Failing
|
||||
@ -1615,8 +1621,7 @@ void folio_end_writeback(struct folio *folio)
|
||||
* reused before the folio_wake().
|
||||
*/
|
||||
folio_get(folio);
|
||||
if (!__folio_end_writeback(folio))
|
||||
BUG();
|
||||
__folio_end_writeback(folio);
|
||||
|
||||
smp_mb__after_atomic();
|
||||
folio_wake(folio, PG_writeback);
|
||||
|
@ -105,7 +105,7 @@ static inline void wake_throttle_isolated(pg_data_t *pgdat)
|
||||
|
||||
vm_fault_t do_swap_page(struct vm_fault *vmf);
|
||||
void folio_rotate_reclaimable(struct folio *folio);
|
||||
bool __folio_end_writeback(struct folio *folio);
|
||||
void __folio_end_writeback(struct folio *folio);
|
||||
void deactivate_file_folio(struct folio *folio);
|
||||
void folio_activate(struct folio *folio);
|
||||
|
||||
|
@ -2940,11 +2940,10 @@ static void wb_inode_writeback_end(struct bdi_writeback *wb)
|
||||
spin_unlock_irqrestore(&wb->work_lock, flags);
|
||||
}
|
||||
|
||||
bool __folio_end_writeback(struct folio *folio)
|
||||
void __folio_end_writeback(struct folio *folio)
|
||||
{
|
||||
long nr = folio_nr_pages(folio);
|
||||
struct address_space *mapping = folio_mapping(folio);
|
||||
bool ret;
|
||||
|
||||
folio_memcg_lock(folio);
|
||||
if (mapping && mapping_use_writeback_tags(mapping)) {
|
||||
@ -2953,19 +2952,16 @@ bool __folio_end_writeback(struct folio *folio)
|
||||
unsigned long flags;
|
||||
|
||||
xa_lock_irqsave(&mapping->i_pages, flags);
|
||||
ret = folio_test_clear_writeback(folio);
|
||||
if (ret) {
|
||||
__xa_clear_mark(&mapping->i_pages, folio_index(folio),
|
||||
PAGECACHE_TAG_WRITEBACK);
|
||||
if (bdi->capabilities & BDI_CAP_WRITEBACK_ACCT) {
|
||||
struct bdi_writeback *wb = inode_to_wb(inode);
|
||||
folio_test_clear_writeback(folio);
|
||||
__xa_clear_mark(&mapping->i_pages, folio_index(folio),
|
||||
PAGECACHE_TAG_WRITEBACK);
|
||||
if (bdi->capabilities & BDI_CAP_WRITEBACK_ACCT) {
|
||||
struct bdi_writeback *wb = inode_to_wb(inode);
|
||||
|
||||
wb_stat_mod(wb, WB_WRITEBACK, -nr);
|
||||
__wb_writeout_add(wb, nr);
|
||||
if (!mapping_tagged(mapping,
|
||||
PAGECACHE_TAG_WRITEBACK))
|
||||
wb_inode_writeback_end(wb);
|
||||
}
|
||||
wb_stat_mod(wb, WB_WRITEBACK, -nr);
|
||||
__wb_writeout_add(wb, nr);
|
||||
if (!mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK))
|
||||
wb_inode_writeback_end(wb);
|
||||
}
|
||||
|
||||
if (mapping->host && !mapping_tagged(mapping,
|
||||
@ -2974,15 +2970,13 @@ bool __folio_end_writeback(struct folio *folio)
|
||||
|
||||
xa_unlock_irqrestore(&mapping->i_pages, flags);
|
||||
} else {
|
||||
ret = folio_test_clear_writeback(folio);
|
||||
}
|
||||
if (ret) {
|
||||
lruvec_stat_mod_folio(folio, NR_WRITEBACK, -nr);
|
||||
zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, -nr);
|
||||
node_stat_mod_folio(folio, NR_WRITTEN, nr);
|
||||
folio_test_clear_writeback(folio);
|
||||
}
|
||||
|
||||
lruvec_stat_mod_folio(folio, NR_WRITEBACK, -nr);
|
||||
zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, -nr);
|
||||
node_stat_mod_folio(folio, NR_WRITTEN, nr);
|
||||
folio_memcg_unlock(folio);
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool __folio_start_writeback(struct folio *folio, bool keep_write)
|
||||
|
Loading…
Reference in New Issue
Block a user