nilfs2: prepare backing device folios for writing after adding checksums

Patch series "nilfs2: eliminate the call to inode_attach_wb()".

This series eliminates the inode_attach_wb() call from nilfs2, which was
introduced as a workaround for a kernel bug but is suspected of layer
violation (in fact, it is undesirable since it exposes a reference to the
backing device).

Removal of the inode_attach_wb() call is done by simply using
mark_buffer_dirty() on the backing device's buffers.  To use it safely,
this series will prepare it in patch 1/2, and perform the replacement
itself in patch 2/2.


This patch (of 2):

In preparation for inode_attach_wb(), which is currently called when
attaching the log writer, to be done via mark_buffer_dirty(), change the
order of preparation for log writing.

Specifically, the function call that adds checksums to segment summary and
super root blocks, which correspond to the log header and trailer, is made
before starting writeback of folios containing those blocks.

The current steps are as follows:

1. Put the folios of segment summary blocks in writeback state.
2. Put the folios of data blocks, metadata file blocks, and btree node
   blocks (collectively called payload blocks) into writeback state.
3. Put the super root block folio in writeback state.
4. Add checksums.

Change these as follows:

1. Put the folios of payload blocks in writeback state.
2. Add checksums.
3. Put the folios of segment summary blocks in writeback state.
4. Put the super root block folio in writeback state.

In this order, the contents of segment summaries and super root block
that directly use buffer/folio of the backing device can be determined
including the addition of checksums, before preparing to write.

Step (1), which puts the payload block folios in writeback state, is
performed first because if there are memory-mapped data blocks, a valid
checksum can only be calculated after step (1).

Link: https://lkml.kernel.org/r/20240610160029.7673-2-konishi.ryusuke@gmail.com
Signed-off-by: Ryusuke Konishi <konishi.ryusuke@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Ryusuke Konishi 2024-06-11 01:00:28 +09:00 committed by Andrew Morton
parent cda1c8e0c3
commit 25fa5f9923

View File

@ -1639,41 +1639,30 @@ static void nilfs_begin_folio_io(struct folio *folio)
folio_unlock(folio);
}
static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
/**
* nilfs_prepare_write_logs - prepare to write logs
* @logs: logs to prepare for writing
* @seed: checksum seed value
*
* nilfs_prepare_write_logs() adds checksums and prepares the block
* buffers/folios for writing logs. In order to stabilize folios of
* memory-mapped file blocks by putting them in writeback state before
* calculating the checksums, first prepare to write payload blocks other
* than segment summary and super root blocks in which the checksums will
* be embedded.
*/
static void nilfs_prepare_write_logs(struct list_head *logs, u32 seed)
{
struct nilfs_segment_buffer *segbuf;
struct folio *bd_folio = NULL, *fs_folio = NULL;
struct buffer_head *bh;
list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
struct buffer_head *bh;
list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
b_assoc_buffers) {
if (bh->b_folio != bd_folio) {
if (bd_folio) {
folio_lock(bd_folio);
folio_wait_writeback(bd_folio);
folio_clear_dirty_for_io(bd_folio);
folio_start_writeback(bd_folio);
folio_unlock(bd_folio);
}
bd_folio = bh->b_folio;
}
}
/* Prepare to write payload blocks */
list_for_each_entry(segbuf, logs, sb_list) {
list_for_each_entry(bh, &segbuf->sb_payload_buffers,
b_assoc_buffers) {
if (bh == segbuf->sb_super_root) {
if (bh->b_folio != bd_folio) {
folio_lock(bd_folio);
folio_wait_writeback(bd_folio);
folio_clear_dirty_for_io(bd_folio);
folio_start_writeback(bd_folio);
folio_unlock(bd_folio);
bd_folio = bh->b_folio;
}
if (bh == segbuf->sb_super_root)
break;
}
set_buffer_async_write(bh);
if (bh->b_folio != fs_folio) {
nilfs_begin_folio_io(fs_folio);
@ -1681,6 +1670,40 @@ static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
}
}
}
nilfs_begin_folio_io(fs_folio);
nilfs_add_checksums_on_logs(logs, seed);
/* Prepare to write segment summary blocks */
list_for_each_entry(segbuf, logs, sb_list) {
list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
b_assoc_buffers) {
if (bh->b_folio == bd_folio)
continue;
if (bd_folio) {
folio_lock(bd_folio);
folio_wait_writeback(bd_folio);
folio_clear_dirty_for_io(bd_folio);
folio_start_writeback(bd_folio);
folio_unlock(bd_folio);
}
bd_folio = bh->b_folio;
}
}
/* Prepare to write super root block */
bh = NILFS_LAST_SEGBUF(logs)->sb_super_root;
if (bh) {
if (bh->b_folio != bd_folio) {
folio_lock(bd_folio);
folio_wait_writeback(bd_folio);
folio_clear_dirty_for_io(bd_folio);
folio_start_writeback(bd_folio);
folio_unlock(bd_folio);
bd_folio = bh->b_folio;
}
}
if (bd_folio) {
folio_lock(bd_folio);
folio_wait_writeback(bd_folio);
@ -1688,7 +1711,6 @@ static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
folio_start_writeback(bd_folio);
folio_unlock(bd_folio);
}
nilfs_begin_folio_io(fs_folio);
}
static int nilfs_segctor_write(struct nilfs_sc_info *sci,
@ -2070,10 +2092,7 @@ static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
nilfs_segctor_update_segusage(sci, nilfs->ns_sufile);
/* Write partial segments */
nilfs_segctor_prepare_write(sci);
nilfs_add_checksums_on_logs(&sci->sc_segbufs,
nilfs->ns_crc_seed);
nilfs_prepare_write_logs(&sci->sc_segbufs, nilfs->ns_crc_seed);
err = nilfs_segctor_write(sci, nilfs);
if (unlikely(err))