mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-28 14:44:10 +08:00
3d078efae6
[BUG] There is a very rare ASSERT() triggering during full fstests run for subpage rw support. No other reproducer so far. The ASSERT() gets triggered for metadata read in btrfs_page_set_uptodate() inside end_page_read(). [CAUSE] There is still a small race window for metadata only, the race could happen like this: T1 | T2 ------------------------------------+----------------------------- end_bio_extent_readpage() | |- btrfs_validate_metadata_buffer() | | |- free_extent_buffer() | | Still have 2 refs | |- end_page_read() | |- if (unlikely(PagePrivate()) | | The page still has Private | | | free_extent_buffer() | | | Only one ref 1, will be | | | released | | |- detach_extent_buffer_page() | | |- btrfs_detach_subpage() |- btrfs_set_page_uptodate() | The page no longer has Private| >>> ASSERT() triggered <<< | This race window is super small, thus pretty hard to hit, even with so many runs of fstests. But the race window is still there, we have to go another way to solve it other than relying on random PagePrivate() check. Data path is not affected, as it will lock the page before reading, while unlocking the page after the last read has finished, thus no race window. [FIX] This patch will fix the bug by repurposing btrfs_subpage::readers. Now btrfs_subpage::readers will be a member shared by both metadata and data. For metadata path, we don't do the page unlock as metadata only relies on extent locking. At the same time, teach page_range_has_eb() to take btrfs_subpage::readers into consideration. So that even if the last eb of a page gets freed, page::private won't be detached as long as there still are pending end_page_read() calls. By this we eliminate the race window, this will slight increase the metadata memory usage, as the page may not be released as frequently as usual. But it should not be a big deal. The code got introduced in ("btrfs: submit read time repair only for each corrupted sector"), but the fix is in a separate patch to keep the problem description and the crash is rare so it should not hurt bisectability. Signed-off-by: Qu Wegruo <wqu@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
130 lines
4.5 KiB
C
130 lines
4.5 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
|
|
#ifndef BTRFS_SUBPAGE_H
|
|
#define BTRFS_SUBPAGE_H
|
|
|
|
#include <linux/spinlock.h>
|
|
|
|
/*
|
|
* Maximum page size we support is 64K, minimum sector size is 4K, u16 bitmap
|
|
* is sufficient. Regular bitmap_* is not used due to size reasons.
|
|
*/
|
|
#define BTRFS_SUBPAGE_BITMAP_SIZE 16
|
|
|
|
/*
|
|
* Structure to trace status of each sector inside a page, attached to
|
|
* page::private for both data and metadata inodes.
|
|
*/
|
|
struct btrfs_subpage {
|
|
/* Common members for both data and metadata pages */
|
|
spinlock_t lock;
|
|
u16 uptodate_bitmap;
|
|
u16 error_bitmap;
|
|
u16 dirty_bitmap;
|
|
u16 writeback_bitmap;
|
|
/*
|
|
* Both data and metadata needs to track how many readers are for the
|
|
* page.
|
|
* Data relies on @readers to unlock the page when last reader finished.
|
|
* While metadata doesn't need page unlock, it needs to prevent
|
|
* page::private get cleared before the last end_page_read().
|
|
*/
|
|
atomic_t readers;
|
|
union {
|
|
/*
|
|
* Structures only used by metadata
|
|
*
|
|
* @eb_refs should only be operated under private_lock, as it
|
|
* manages whether the subpage can be detached.
|
|
*/
|
|
atomic_t eb_refs;
|
|
/* Structures only used by data */
|
|
struct {
|
|
atomic_t writers;
|
|
|
|
/* Tracke pending ordered extent in this sector */
|
|
u16 ordered_bitmap;
|
|
};
|
|
};
|
|
};
|
|
|
|
enum btrfs_subpage_type {
|
|
BTRFS_SUBPAGE_METADATA,
|
|
BTRFS_SUBPAGE_DATA,
|
|
};
|
|
|
|
int btrfs_attach_subpage(const struct btrfs_fs_info *fs_info,
|
|
struct page *page, enum btrfs_subpage_type type);
|
|
void btrfs_detach_subpage(const struct btrfs_fs_info *fs_info,
|
|
struct page *page);
|
|
|
|
/* Allocate additional data where page represents more than one sector */
|
|
int btrfs_alloc_subpage(const struct btrfs_fs_info *fs_info,
|
|
struct btrfs_subpage **ret,
|
|
enum btrfs_subpage_type type);
|
|
void btrfs_free_subpage(struct btrfs_subpage *subpage);
|
|
|
|
void btrfs_page_inc_eb_refs(const struct btrfs_fs_info *fs_info,
|
|
struct page *page);
|
|
void btrfs_page_dec_eb_refs(const struct btrfs_fs_info *fs_info,
|
|
struct page *page);
|
|
|
|
void btrfs_subpage_start_reader(const struct btrfs_fs_info *fs_info,
|
|
struct page *page, u64 start, u32 len);
|
|
void btrfs_subpage_end_reader(const struct btrfs_fs_info *fs_info,
|
|
struct page *page, u64 start, u32 len);
|
|
|
|
void btrfs_subpage_start_writer(const struct btrfs_fs_info *fs_info,
|
|
struct page *page, u64 start, u32 len);
|
|
bool btrfs_subpage_end_and_test_writer(const struct btrfs_fs_info *fs_info,
|
|
struct page *page, u64 start, u32 len);
|
|
int btrfs_page_start_writer_lock(const struct btrfs_fs_info *fs_info,
|
|
struct page *page, u64 start, u32 len);
|
|
void btrfs_page_end_writer_lock(const struct btrfs_fs_info *fs_info,
|
|
struct page *page, u64 start, u32 len);
|
|
|
|
/*
|
|
* Template for subpage related operations.
|
|
*
|
|
* btrfs_subpage_*() are for call sites where the page has subpage attached and
|
|
* the range is ensured to be inside the page.
|
|
*
|
|
* btrfs_page_*() are for call sites where the page can either be subpage
|
|
* specific or regular page. The function will handle both cases.
|
|
* But the range still needs to be inside the page.
|
|
*
|
|
* btrfs_page_clamp_*() are similar to btrfs_page_*(), except the range doesn't
|
|
* need to be inside the page. Those functions will truncate the range
|
|
* automatically.
|
|
*/
|
|
#define DECLARE_BTRFS_SUBPAGE_OPS(name) \
|
|
void btrfs_subpage_set_##name(const struct btrfs_fs_info *fs_info, \
|
|
struct page *page, u64 start, u32 len); \
|
|
void btrfs_subpage_clear_##name(const struct btrfs_fs_info *fs_info, \
|
|
struct page *page, u64 start, u32 len); \
|
|
bool btrfs_subpage_test_##name(const struct btrfs_fs_info *fs_info, \
|
|
struct page *page, u64 start, u32 len); \
|
|
void btrfs_page_set_##name(const struct btrfs_fs_info *fs_info, \
|
|
struct page *page, u64 start, u32 len); \
|
|
void btrfs_page_clear_##name(const struct btrfs_fs_info *fs_info, \
|
|
struct page *page, u64 start, u32 len); \
|
|
bool btrfs_page_test_##name(const struct btrfs_fs_info *fs_info, \
|
|
struct page *page, u64 start, u32 len); \
|
|
void btrfs_page_clamp_set_##name(const struct btrfs_fs_info *fs_info, \
|
|
struct page *page, u64 start, u32 len); \
|
|
void btrfs_page_clamp_clear_##name(const struct btrfs_fs_info *fs_info, \
|
|
struct page *page, u64 start, u32 len); \
|
|
bool btrfs_page_clamp_test_##name(const struct btrfs_fs_info *fs_info, \
|
|
struct page *page, u64 start, u32 len);
|
|
|
|
DECLARE_BTRFS_SUBPAGE_OPS(uptodate);
|
|
DECLARE_BTRFS_SUBPAGE_OPS(error);
|
|
DECLARE_BTRFS_SUBPAGE_OPS(dirty);
|
|
DECLARE_BTRFS_SUBPAGE_OPS(writeback);
|
|
DECLARE_BTRFS_SUBPAGE_OPS(ordered);
|
|
|
|
bool btrfs_subpage_clear_and_test_dirty(const struct btrfs_fs_info *fs_info,
|
|
struct page *page, u64 start, u32 len);
|
|
|
|
#endif
|