2019-06-21 03:37:44 +08:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0 */
|
|
|
|
|
|
|
|
#ifndef BTRFS_BLOCK_GROUP_H
|
|
|
|
#define BTRFS_BLOCK_GROUP_H
|
|
|
|
|
2019-08-22 01:57:04 +08:00
|
|
|
#include "free-space-cache.h"
|
|
|
|
|
2019-06-21 03:37:44 +08:00
|
|
|
enum btrfs_disk_cache_state {
|
|
|
|
BTRFS_DC_WRITTEN,
|
|
|
|
BTRFS_DC_ERROR,
|
|
|
|
BTRFS_DC_CLEAR,
|
|
|
|
BTRFS_DC_SETUP,
|
|
|
|
};
|
|
|
|
|
2019-12-14 08:22:16 +08:00
|
|
|
/*
|
|
|
|
* This describes the state of the block_group for async discard. This is due
|
|
|
|
* to the two pass nature of it where extent discarding is prioritized over
|
|
|
|
* bitmap discarding. BTRFS_DISCARD_RESET_CURSOR is set when we are resetting
|
|
|
|
* between lists to prevent contention for discard state variables
|
|
|
|
* (eg. discard_cursor).
|
|
|
|
*/
|
|
|
|
enum btrfs_discard_state {
|
|
|
|
BTRFS_DISCARD_EXTENTS,
|
|
|
|
BTRFS_DISCARD_BITMAPS,
|
|
|
|
BTRFS_DISCARD_RESET_CURSOR,
|
|
|
|
};
|
|
|
|
|
2019-06-21 03:38:04 +08:00
|
|
|
/*
|
|
|
|
* Control flags for do_chunk_alloc's force field CHUNK_ALLOC_NO_FORCE means to
|
|
|
|
* only allocate a chunk if we really need one.
|
|
|
|
*
|
|
|
|
* CHUNK_ALLOC_LIMITED means to only try and allocate one if we have very few
|
|
|
|
* chunks already allocated. This is used as part of the clustering code to
|
|
|
|
* help make sure we have a good pool of storage to cluster in, without filling
|
|
|
|
* the FS with empty chunks
|
|
|
|
*
|
|
|
|
* CHUNK_ALLOC_FORCE means it must try to allocate one
|
|
|
|
*/
|
|
|
|
enum btrfs_chunk_alloc_enum {
|
|
|
|
CHUNK_ALLOC_NO_FORCE,
|
|
|
|
CHUNK_ALLOC_LIMITED,
|
|
|
|
CHUNK_ALLOC_FORCE,
|
|
|
|
};
|
|
|
|
|
2019-06-21 03:37:44 +08:00
|
|
|
struct btrfs_caching_control {
|
|
|
|
struct list_head list;
|
|
|
|
struct mutex mutex;
|
|
|
|
wait_queue_head_t wait;
|
|
|
|
struct btrfs_work work;
|
2019-10-30 02:20:18 +08:00
|
|
|
struct btrfs_block_group *block_group;
|
2019-06-21 03:37:44 +08:00
|
|
|
u64 progress;
|
|
|
|
refcount_t count;
|
|
|
|
};
|
|
|
|
|
|
|
|
/* Once caching_thread() finds this much free space, it will wake up waiters. */
|
|
|
|
#define CACHING_CTL_WAKE_UP SZ_2M
|
|
|
|
|
2019-10-30 02:20:18 +08:00
|
|
|
struct btrfs_block_group {
|
2019-06-21 03:37:44 +08:00
|
|
|
struct btrfs_fs_info *fs_info;
|
|
|
|
struct inode *inode;
|
|
|
|
spinlock_t lock;
|
2019-10-24 00:48:22 +08:00
|
|
|
u64 start;
|
|
|
|
u64 length;
|
2019-06-21 03:37:44 +08:00
|
|
|
u64 pinned;
|
|
|
|
u64 reserved;
|
2019-10-24 00:48:11 +08:00
|
|
|
u64 used;
|
2019-06-21 03:37:44 +08:00
|
|
|
u64 delalloc_bytes;
|
|
|
|
u64 bytes_super;
|
|
|
|
u64 flags;
|
|
|
|
u64 cache_generation;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the free space extent count exceeds this number, convert the block
|
|
|
|
* group to bitmaps.
|
|
|
|
*/
|
|
|
|
u32 bitmap_high_thresh;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the free space extent count drops below this number, convert the
|
|
|
|
* block group back to extents.
|
|
|
|
*/
|
|
|
|
u32 bitmap_low_thresh;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* It is just used for the delayed data space allocation because
|
|
|
|
* only the data space allocation and the relative metadata update
|
|
|
|
* can be done cross the transaction.
|
|
|
|
*/
|
|
|
|
struct rw_semaphore data_rwsem;
|
|
|
|
|
|
|
|
/* For raid56, this is a full stripe, without parity */
|
|
|
|
unsigned long full_stripe_len;
|
|
|
|
|
|
|
|
unsigned int ro;
|
|
|
|
unsigned int iref:1;
|
|
|
|
unsigned int has_caching_ctl:1;
|
|
|
|
unsigned int removed:1;
|
2021-02-04 18:22:11 +08:00
|
|
|
unsigned int to_copy:1;
|
btrfs: zoned: relocate block group to repair IO failure in zoned filesystems
When a bad checksum is found and if the filesystem has a mirror of the
damaged data, we read the correct data from the mirror and writes it to
damaged blocks. This however, violates the sequential write constraints
of a zoned block device.
We can consider three methods to repair an IO failure in zoned filesystems:
(1) Reset and rewrite the damaged zone
(2) Allocate new device extent and replace the damaged device extent to
the new extent
(3) Relocate the corresponding block group
Method (1) is most similar to a behavior done with regular devices.
However, it also wipes non-damaged data in the same device extent, and
so it unnecessary degrades non-damaged data.
Method (2) is much like device replacing but done in the same device. It
is safe because it keeps the device extent until the replacing finish.
However, extending device replacing is non-trivial. It assumes
"src_dev->physical == dst_dev->physical". Also, the extent mapping
replacing function should be extended to support replacing device extent
position in one device.
Method (3) invokes relocation of the damaged block group and is
straightforward to implement. It relocates all the mirrored device
extents, so it potentially is a more costly operation than method (1) or
(2). But it relocates only used extents which reduce the total IO size.
Let's apply method (3) for now. In the future, we can extend device-replace
and apply method (2).
For protecting a block group gets relocated multiple time with multiple
IO errors, this commit introduces "relocating_repair" bit to show it's
now relocating to repair IO failures. Also it uses a new kthread
"btrfs-relocating-repair", not to block IO path with relocating process.
This commit also supports repairing in the scrub process.
Reviewed-by: Josef Bacik <josef@toxicpanda.com>
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2021-02-04 18:22:16 +08:00
|
|
|
unsigned int relocating_repair:1;
|
2019-06-21 03:37:44 +08:00
|
|
|
|
|
|
|
int disk_cache_state;
|
|
|
|
|
|
|
|
/* Cache tracking stuff */
|
|
|
|
int cached;
|
|
|
|
struct btrfs_caching_control *caching_ctl;
|
|
|
|
u64 last_byte_to_unpin;
|
|
|
|
|
|
|
|
struct btrfs_space_info *space_info;
|
|
|
|
|
|
|
|
/* Free space cache stuff */
|
|
|
|
struct btrfs_free_space_ctl *free_space_ctl;
|
|
|
|
|
|
|
|
/* Block group cache stuff */
|
|
|
|
struct rb_node cache_node;
|
|
|
|
|
|
|
|
/* For block groups in the same raid type */
|
|
|
|
struct list_head list;
|
|
|
|
|
2020-07-06 21:14:11 +08:00
|
|
|
refcount_t refs;
|
2019-06-21 03:37:44 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* List of struct btrfs_free_clusters for this block group.
|
|
|
|
* Today it will only have one thing on it, but that may change
|
|
|
|
*/
|
|
|
|
struct list_head cluster_list;
|
|
|
|
|
|
|
|
/* For delayed block group creation or deletion of empty block groups */
|
|
|
|
struct list_head bg_list;
|
|
|
|
|
|
|
|
/* For read-only block groups */
|
|
|
|
struct list_head ro_list;
|
|
|
|
|
2020-05-08 18:01:47 +08:00
|
|
|
/*
|
|
|
|
* When non-zero it means the block group's logical address and its
|
|
|
|
* device extents can not be reused for future block group allocations
|
|
|
|
* until the counter goes down to 0. This is to prevent them from being
|
|
|
|
* reused while some task is still using the block group after it was
|
|
|
|
* deleted - we want to make sure they can only be reused for new block
|
|
|
|
* groups after that task is done with the deleted block group.
|
|
|
|
*/
|
|
|
|
atomic_t frozen;
|
|
|
|
|
2019-12-14 08:22:14 +08:00
|
|
|
/* For discard operations */
|
|
|
|
struct list_head discard_list;
|
|
|
|
int discard_index;
|
|
|
|
u64 discard_eligible_time;
|
2019-12-14 08:22:16 +08:00
|
|
|
u64 discard_cursor;
|
|
|
|
enum btrfs_discard_state discard_state;
|
2019-06-21 03:37:44 +08:00
|
|
|
|
|
|
|
/* For dirty block groups */
|
|
|
|
struct list_head dirty_list;
|
|
|
|
struct list_head io_list;
|
|
|
|
|
|
|
|
struct btrfs_io_ctl io_ctl;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Incremented when doing extent allocations and holding a read lock
|
|
|
|
* on the space_info's groups_sem semaphore.
|
|
|
|
* Decremented when an ordered extent that represents an IO against this
|
|
|
|
* block group's range is created (after it's added to its inode's
|
|
|
|
* root's list of ordered extents) or immediately after the allocation
|
|
|
|
* if it's a metadata extent or fallocate extent (for these cases we
|
|
|
|
* don't create ordered extents).
|
|
|
|
*/
|
|
|
|
atomic_t reservations;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Incremented while holding the spinlock *lock* by a task checking if
|
|
|
|
* it can perform a nocow write (incremented if the value for the *ro*
|
|
|
|
* field is 0). Decremented by such tasks once they create an ordered
|
|
|
|
* extent or before that if some error happens before reaching that step.
|
|
|
|
* This is to prevent races between block group relocation and nocow
|
|
|
|
* writes through direct IO.
|
|
|
|
*/
|
|
|
|
atomic_t nocow_writers;
|
|
|
|
|
|
|
|
/* Lock for free space tree operations. */
|
|
|
|
struct mutex free_space_lock;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Does the block group need to be added to the free space tree?
|
|
|
|
* Protected by free_space_lock.
|
|
|
|
*/
|
|
|
|
int needs_free_space;
|
|
|
|
|
2021-02-04 18:22:03 +08:00
|
|
|
/* Flag indicating this block group is placed on a sequential zone */
|
|
|
|
bool seq_zone;
|
|
|
|
|
btrfs: fix race between writes to swap files and scrub
When we active a swap file, at btrfs_swap_activate(), we acquire the
exclusive operation lock to prevent the physical location of the swap
file extents to be changed by operations such as balance and device
replace/resize/remove. We also call there can_nocow_extent() which,
among other things, checks if the block group of a swap file extent is
currently RO, and if it is we can not use the extent, since a write
into it would result in COWing the extent.
However we have no protection against a scrub operation running after we
activate the swap file, which can result in the swap file extents to be
COWed while the scrub is running and operating on the respective block
group, because scrub turns a block group into RO before it processes it
and then back again to RW mode after processing it. That means an attempt
to write into a swap file extent while scrub is processing the respective
block group, will result in COWing the extent, changing its physical
location on disk.
Fix this by making sure that block groups that have extents that are used
by active swap files can not be turned into RO mode, therefore making it
not possible for a scrub to turn them into RO mode. When a scrub finds a
block group that can not be turned to RO due to the existence of extents
used by swap files, it proceeds to the next block group and logs a warning
message that mentions the block group was skipped due to active swap
files - this is the same approach we currently use for balance.
Fixes: ed46ff3d42378 ("Btrfs: support swap files")
CC: stable@vger.kernel.org # 5.4+
Reviewed-by: Anand Jain <anand.jain@oracle.com>
Reviewed-by: Josef Bacik <josef@toxicpanda.com>
Signed-off-by: Filipe Manana <fdmanana@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2021-02-05 20:55:37 +08:00
|
|
|
/*
|
|
|
|
* Number of extents in this block group used for swap files.
|
|
|
|
* All accesses protected by the spinlock 'lock'.
|
|
|
|
*/
|
|
|
|
int swap_extents;
|
|
|
|
|
2019-06-21 03:37:44 +08:00
|
|
|
/* Record locked full stripes for RAID5/6 block group */
|
|
|
|
struct btrfs_full_stripe_locks_tree full_stripe_locks_root;
|
2021-02-04 18:21:50 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Allocation offset for the block group to implement sequential
|
|
|
|
* allocation. This is used only on a zoned filesystem.
|
|
|
|
*/
|
|
|
|
u64 alloc_offset;
|
2021-02-04 18:21:52 +08:00
|
|
|
u64 zone_unusable;
|
2021-02-04 18:22:08 +08:00
|
|
|
u64 meta_write_pointer;
|
2019-06-21 03:37:44 +08:00
|
|
|
};
|
|
|
|
|
2019-12-14 08:22:14 +08:00
|
|
|
static inline u64 btrfs_block_group_end(struct btrfs_block_group *block_group)
|
|
|
|
{
|
|
|
|
return (block_group->start + block_group->length);
|
|
|
|
}
|
|
|
|
|
2020-01-03 05:26:40 +08:00
|
|
|
static inline bool btrfs_is_block_group_data_only(
|
|
|
|
struct btrfs_block_group *block_group)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* In mixed mode the fragmentation is expected to be high, lowering the
|
|
|
|
* efficiency, so only proper data block groups are considered.
|
|
|
|
*/
|
|
|
|
return (block_group->flags & BTRFS_BLOCK_GROUP_DATA) &&
|
|
|
|
!(block_group->flags & BTRFS_BLOCK_GROUP_METADATA);
|
|
|
|
}
|
|
|
|
|
2019-06-21 03:37:44 +08:00
|
|
|
#ifdef CONFIG_BTRFS_DEBUG
|
|
|
|
static inline int btrfs_should_fragment_free_space(
|
2019-10-30 02:20:18 +08:00
|
|
|
struct btrfs_block_group *block_group)
|
2019-06-21 03:37:44 +08:00
|
|
|
{
|
|
|
|
struct btrfs_fs_info *fs_info = block_group->fs_info;
|
|
|
|
|
|
|
|
return (btrfs_test_opt(fs_info, FRAGMENT_METADATA) &&
|
|
|
|
block_group->flags & BTRFS_BLOCK_GROUP_METADATA) ||
|
|
|
|
(btrfs_test_opt(fs_info, FRAGMENT_DATA) &&
|
|
|
|
block_group->flags & BTRFS_BLOCK_GROUP_DATA);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2019-10-30 02:20:18 +08:00
|
|
|
struct btrfs_block_group *btrfs_lookup_first_block_group(
|
2019-06-21 03:37:45 +08:00
|
|
|
struct btrfs_fs_info *info, u64 bytenr);
|
2019-10-30 02:20:18 +08:00
|
|
|
struct btrfs_block_group *btrfs_lookup_block_group(
|
2019-06-21 03:37:45 +08:00
|
|
|
struct btrfs_fs_info *info, u64 bytenr);
|
2019-10-30 02:20:18 +08:00
|
|
|
struct btrfs_block_group *btrfs_next_block_group(
|
|
|
|
struct btrfs_block_group *cache);
|
|
|
|
void btrfs_get_block_group(struct btrfs_block_group *cache);
|
|
|
|
void btrfs_put_block_group(struct btrfs_block_group *cache);
|
2019-06-21 03:37:47 +08:00
|
|
|
void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info,
|
|
|
|
const u64 start);
|
2019-10-30 02:20:18 +08:00
|
|
|
void btrfs_wait_block_group_reservations(struct btrfs_block_group *bg);
|
2019-06-21 03:37:47 +08:00
|
|
|
bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr);
|
|
|
|
void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr);
|
2019-10-30 02:20:18 +08:00
|
|
|
void btrfs_wait_nocow_writers(struct btrfs_block_group *bg);
|
|
|
|
void btrfs_wait_block_group_cache_progress(struct btrfs_block_group *cache,
|
2019-06-21 03:37:48 +08:00
|
|
|
u64 num_bytes);
|
2019-10-30 02:20:18 +08:00
|
|
|
int btrfs_wait_block_group_cache_done(struct btrfs_block_group *cache);
|
|
|
|
int btrfs_cache_block_group(struct btrfs_block_group *cache,
|
2019-06-21 03:37:48 +08:00
|
|
|
int load_cache_only);
|
2019-06-21 03:37:50 +08:00
|
|
|
void btrfs_put_caching_control(struct btrfs_caching_control *ctl);
|
|
|
|
struct btrfs_caching_control *btrfs_get_caching_control(
|
2019-10-30 02:20:18 +08:00
|
|
|
struct btrfs_block_group *cache);
|
|
|
|
u64 add_new_free_space(struct btrfs_block_group *block_group,
|
2019-08-06 22:43:19 +08:00
|
|
|
u64 start, u64 end);
|
2019-06-21 03:37:55 +08:00
|
|
|
struct btrfs_trans_handle *btrfs_start_trans_remove_block_group(
|
|
|
|
struct btrfs_fs_info *fs_info,
|
|
|
|
const u64 chunk_offset);
|
|
|
|
int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
|
|
|
|
u64 group_start, struct extent_map *em);
|
|
|
|
void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info);
|
2019-10-30 02:20:18 +08:00
|
|
|
void btrfs_mark_bg_unused(struct btrfs_block_group *bg);
|
2021-04-19 15:41:02 +08:00
|
|
|
void btrfs_reclaim_bgs_work(struct work_struct *work);
|
|
|
|
void btrfs_reclaim_bgs(struct btrfs_fs_info *fs_info);
|
|
|
|
void btrfs_mark_bg_to_reclaim(struct btrfs_block_group *bg);
|
2019-06-21 03:37:57 +08:00
|
|
|
int btrfs_read_block_groups(struct btrfs_fs_info *info);
|
|
|
|
int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used,
|
|
|
|
u64 type, u64 chunk_offset, u64 size);
|
|
|
|
void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans);
|
btrfs: scrub: Don't check free space before marking a block group RO
[BUG]
When running btrfs/072 with only one online CPU, it has a pretty high
chance to fail:
btrfs/072 12s ... _check_dmesg: something found in dmesg (see xfstests-dev/results//btrfs/072.dmesg)
- output mismatch (see xfstests-dev/results//btrfs/072.out.bad)
--- tests/btrfs/072.out 2019-10-22 15:18:14.008965340 +0800
+++ /xfstests-dev/results//btrfs/072.out.bad 2019-11-14 15:56:45.877152240 +0800
@@ -1,2 +1,3 @@
QA output created by 072
Silence is golden
+Scrub find errors in "-m dup -d single" test
...
And with the following call trace:
BTRFS info (device dm-5): scrub: started on devid 1
------------[ cut here ]------------
BTRFS: Transaction aborted (error -27)
WARNING: CPU: 0 PID: 55087 at fs/btrfs/block-group.c:1890 btrfs_create_pending_block_groups+0x3e6/0x470 [btrfs]
CPU: 0 PID: 55087 Comm: btrfs Tainted: G W O 5.4.0-rc1-custom+ #13
Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 0.0.0 02/06/2015
RIP: 0010:btrfs_create_pending_block_groups+0x3e6/0x470 [btrfs]
Call Trace:
__btrfs_end_transaction+0xdb/0x310 [btrfs]
btrfs_end_transaction+0x10/0x20 [btrfs]
btrfs_inc_block_group_ro+0x1c9/0x210 [btrfs]
scrub_enumerate_chunks+0x264/0x940 [btrfs]
btrfs_scrub_dev+0x45c/0x8f0 [btrfs]
btrfs_ioctl+0x31a1/0x3fb0 [btrfs]
do_vfs_ioctl+0x636/0xaa0
ksys_ioctl+0x67/0x90
__x64_sys_ioctl+0x43/0x50
do_syscall_64+0x79/0xe0
entry_SYSCALL_64_after_hwframe+0x49/0xbe
---[ end trace 166c865cec7688e7 ]---
[CAUSE]
The error number -27 is -EFBIG, returned from the following call chain:
btrfs_end_transaction()
|- __btrfs_end_transaction()
|- btrfs_create_pending_block_groups()
|- btrfs_finish_chunk_alloc()
|- btrfs_add_system_chunk()
This happens because we have used up all space of
btrfs_super_block::sys_chunk_array.
The root cause is, we have the following bad loop of creating tons of
system chunks:
1. The only SYSTEM chunk is being scrubbed
It's very common to have only one SYSTEM chunk.
2. New SYSTEM bg will be allocated
As btrfs_inc_block_group_ro() will check if we have enough space
after marking current bg RO. If not, then allocate a new chunk.
3. New SYSTEM bg is still empty, will be reclaimed
During the reclaim, we will mark it RO again.
4. That newly allocated empty SYSTEM bg get scrubbed
We go back to step 2, as the bg is already mark RO but still not
cleaned up yet.
If the cleaner kthread doesn't get executed fast enough (e.g. only one
CPU), then we will get more and more empty SYSTEM chunks, using up all
the space of btrfs_super_block::sys_chunk_array.
[FIX]
Since scrub/dev-replace doesn't always need to allocate new extent,
especially chunk tree extent, so we don't really need to do chunk
pre-allocation.
To break above spiral, here we introduce a new parameter to
btrfs_inc_block_group(), @do_chunk_alloc, which indicates whether we
need extra chunk pre-allocation.
For relocation, we pass @do_chunk_alloc=true, while for scrub, we pass
@do_chunk_alloc=false.
This should keep unnecessary empty chunks from popping up for scrub.
Also, since there are two parameters for btrfs_inc_block_group_ro(),
add more comment for it.
Reviewed-by: Filipe Manana <fdmanana@suse.com>
Signed-off-by: Qu Wenruo <wqu@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2019-11-15 10:09:00 +08:00
|
|
|
int btrfs_inc_block_group_ro(struct btrfs_block_group *cache,
|
|
|
|
bool do_chunk_alloc);
|
2019-10-30 02:20:18 +08:00
|
|
|
void btrfs_dec_block_group_ro(struct btrfs_block_group *cache);
|
2019-06-21 03:38:00 +08:00
|
|
|
int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans);
|
|
|
|
int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans);
|
|
|
|
int btrfs_setup_space_cache(struct btrfs_trans_handle *trans);
|
2019-06-21 03:38:01 +08:00
|
|
|
int btrfs_update_block_group(struct btrfs_trans_handle *trans,
|
|
|
|
u64 bytenr, u64 num_bytes, int alloc);
|
2019-10-30 02:20:18 +08:00
|
|
|
int btrfs_add_reserved_bytes(struct btrfs_block_group *cache,
|
2019-06-21 03:38:01 +08:00
|
|
|
u64 ram_bytes, u64 num_bytes, int delalloc);
|
2019-10-30 02:20:18 +08:00
|
|
|
void btrfs_free_reserved_bytes(struct btrfs_block_group *cache,
|
2019-06-21 03:38:01 +08:00
|
|
|
u64 num_bytes, int delalloc);
|
2019-06-21 03:38:04 +08:00
|
|
|
int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
|
|
|
|
enum btrfs_chunk_alloc_enum force);
|
|
|
|
int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, u64 type);
|
|
|
|
void check_system_chunk(struct btrfs_trans_handle *trans, const u64 type);
|
2019-06-21 03:38:05 +08:00
|
|
|
u64 btrfs_get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags);
|
2019-06-21 03:38:06 +08:00
|
|
|
void btrfs_put_block_group_cache(struct btrfs_fs_info *info);
|
|
|
|
int btrfs_free_block_groups(struct btrfs_fs_info *info);
|
2020-10-23 21:58:10 +08:00
|
|
|
void btrfs_wait_space_cache_v1_finished(struct btrfs_block_group *cache,
|
|
|
|
struct btrfs_caching_control *caching_ctl);
|
2021-02-04 18:22:02 +08:00
|
|
|
int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start,
|
|
|
|
struct block_device *bdev, u64 physical, u64 **logical,
|
|
|
|
int *naddrs, int *stripe_len);
|
2019-06-21 03:38:05 +08:00
|
|
|
|
|
|
|
static inline u64 btrfs_data_alloc_profile(struct btrfs_fs_info *fs_info)
|
|
|
|
{
|
|
|
|
return btrfs_get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_DATA);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline u64 btrfs_metadata_alloc_profile(struct btrfs_fs_info *fs_info)
|
|
|
|
{
|
|
|
|
return btrfs_get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_METADATA);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline u64 btrfs_system_alloc_profile(struct btrfs_fs_info *fs_info)
|
|
|
|
{
|
|
|
|
return btrfs_get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
|
|
|
|
}
|
2019-06-21 03:37:48 +08:00
|
|
|
|
2019-10-30 02:20:18 +08:00
|
|
|
static inline int btrfs_block_group_done(struct btrfs_block_group *cache)
|
2019-06-21 03:37:48 +08:00
|
|
|
{
|
|
|
|
smp_mb();
|
|
|
|
return cache->cached == BTRFS_CACHE_FINISHED ||
|
|
|
|
cache->cached == BTRFS_CACHE_ERROR;
|
|
|
|
}
|
2019-06-21 03:37:45 +08:00
|
|
|
|
2020-05-08 18:01:59 +08:00
|
|
|
void btrfs_freeze_block_group(struct btrfs_block_group *cache);
|
|
|
|
void btrfs_unfreeze_block_group(struct btrfs_block_group *cache);
|
|
|
|
|
btrfs: fix race between writes to swap files and scrub
When we active a swap file, at btrfs_swap_activate(), we acquire the
exclusive operation lock to prevent the physical location of the swap
file extents to be changed by operations such as balance and device
replace/resize/remove. We also call there can_nocow_extent() which,
among other things, checks if the block group of a swap file extent is
currently RO, and if it is we can not use the extent, since a write
into it would result in COWing the extent.
However we have no protection against a scrub operation running after we
activate the swap file, which can result in the swap file extents to be
COWed while the scrub is running and operating on the respective block
group, because scrub turns a block group into RO before it processes it
and then back again to RW mode after processing it. That means an attempt
to write into a swap file extent while scrub is processing the respective
block group, will result in COWing the extent, changing its physical
location on disk.
Fix this by making sure that block groups that have extents that are used
by active swap files can not be turned into RO mode, therefore making it
not possible for a scrub to turn them into RO mode. When a scrub finds a
block group that can not be turned to RO due to the existence of extents
used by swap files, it proceeds to the next block group and logs a warning
message that mentions the block group was skipped due to active swap
files - this is the same approach we currently use for balance.
Fixes: ed46ff3d42378 ("Btrfs: support swap files")
CC: stable@vger.kernel.org # 5.4+
Reviewed-by: Anand Jain <anand.jain@oracle.com>
Reviewed-by: Josef Bacik <josef@toxicpanda.com>
Signed-off-by: Filipe Manana <fdmanana@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2021-02-05 20:55:37 +08:00
|
|
|
bool btrfs_inc_block_group_swap_extents(struct btrfs_block_group *bg);
|
|
|
|
void btrfs_dec_block_group_swap_extents(struct btrfs_block_group *bg, int amount);
|
|
|
|
|
2019-06-21 03:37:44 +08:00
|
|
|
#endif /* BTRFS_BLOCK_GROUP_H */
|