2020-11-10 19:26:07 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
|
2021-02-04 18:21:48 +08:00
|
|
|
#include <linux/bitops.h>
|
2020-11-10 19:26:07 +08:00
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/blkdev.h>
|
2021-02-04 18:21:50 +08:00
|
|
|
#include <linux/sched/mm.h>
|
2021-08-19 20:19:15 +08:00
|
|
|
#include <linux/atomic.h>
|
2021-11-11 13:14:38 +08:00
|
|
|
#include <linux/vmalloc.h>
|
2020-11-10 19:26:07 +08:00
|
|
|
#include "ctree.h"
|
|
|
|
#include "volumes.h"
|
|
|
|
#include "zoned.h"
|
|
|
|
#include "rcu-string.h"
|
2021-02-04 18:21:48 +08:00
|
|
|
#include "disk-io.h"
|
2021-02-04 18:21:50 +08:00
|
|
|
#include "block-group.h"
|
2021-02-04 18:21:54 +08:00
|
|
|
#include "transaction.h"
|
2021-02-04 18:22:12 +08:00
|
|
|
#include "dev-replace.h"
|
2021-02-04 18:22:14 +08:00
|
|
|
#include "space-info.h"
|
2022-10-19 22:50:47 +08:00
|
|
|
#include "fs.h"
|
2022-10-19 22:51:00 +08:00
|
|
|
#include "accessors.h"
|
2023-01-21 14:50:18 +08:00
|
|
|
#include "bio.h"
|
2020-11-10 19:26:07 +08:00
|
|
|
|
|
|
|
/* Maximum number of zones to report per blkdev_report_zones() call */
|
|
|
|
#define BTRFS_REPORT_NR_ZONES 4096
|
2021-02-04 18:21:50 +08:00
|
|
|
/* Invalid allocation pointer value for missing devices */
|
|
|
|
#define WP_MISSING_DEV ((u64)-1)
|
|
|
|
/* Pseudo write pointer value for conventional zone */
|
|
|
|
#define WP_CONVENTIONAL ((u64)-2)
|
2020-11-10 19:26:07 +08:00
|
|
|
|
btrfs: zoned: move superblock logging zone location
Moves the location of the superblock logging zones. The new locations of
the logging zones are now determined based on fixed block addresses
instead of on fixed zone numbers.
The old placement method based on fixed zone numbers causes problems when
one needs to inspect a file system image without access to the drive zone
information. In such case, the super block locations cannot be reliably
determined as the zone size is unknown. By locating the superblock logging
zones using fixed addresses, we can scan a dumped file system image without
the zone information since a super block copy will always be present at or
after the fixed known locations.
Introduce the following three pairs of zones containing fixed offset
locations, regardless of the device zone size.
- primary superblock: offset 0B (and the following zone)
- first copy: offset 512G (and the following zone)
- Second copy: offset 4T (4096G, and the following zone)
If a logging zone is outside of the disk capacity, we do not record the
superblock copy.
The first copy position is much larger than for a non-zoned filesystem,
which is at 64M. This is to avoid overlapping with the log zones for
the primary superblock. This higher location is arbitrary but allows
supporting devices with very large zone sizes, plus some space around in
between.
Such large zone size is unrealistic and very unlikely to ever be seen in
real devices. Currently, SMR disks have a zone size of 256MB, and we are
expecting ZNS drives to be in the 1-4GB range, so this limit gives us
room to breathe. For now, we only allow zone sizes up to 8GB. The
maximum zone size that would still fit in the space is 256G.
The fixed location addresses are somewhat arbitrary, with the intent of
maintaining superblock reliability for smaller and larger devices, with
the preference for the latter. For this reason, there are two superblocks
under the first 1T. This should cover use cases for physical devices and
for emulated/device-mapper devices.
The superblock logging zones are reserved for superblock logging and
never used for data or metadata blocks. Note that we only reserve the
two zones per primary/copy actually used for superblock logging. We do
not reserve the ranges of zones possibly containing superblocks with the
largest supported zone size (0-16GB, 512G-528GB, 4096G-4112G).
The zones containing the fixed location offsets used to store
superblocks on a non-zoned volume are also reserved to avoid confusion.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2021-04-08 16:25:28 +08:00
|
|
|
/*
|
|
|
|
* Location of the first zone of superblock logging zone pairs.
|
|
|
|
*
|
|
|
|
* - primary superblock: 0B (zone 0)
|
|
|
|
* - first copy: 512G (zone starting at that offset)
|
|
|
|
* - second copy: 4T (zone starting at that offset)
|
|
|
|
*/
|
|
|
|
#define BTRFS_SB_LOG_PRIMARY_OFFSET (0ULL)
|
|
|
|
#define BTRFS_SB_LOG_FIRST_OFFSET (512ULL * SZ_1G)
|
|
|
|
#define BTRFS_SB_LOG_SECOND_OFFSET (4096ULL * SZ_1G)
|
|
|
|
|
|
|
|
#define BTRFS_SB_LOG_FIRST_SHIFT const_ilog2(BTRFS_SB_LOG_FIRST_OFFSET)
|
|
|
|
#define BTRFS_SB_LOG_SECOND_SHIFT const_ilog2(BTRFS_SB_LOG_SECOND_OFFSET)
|
|
|
|
|
btrfs: implement log-structured superblock for ZONED mode
Superblock (and its copies) is the only data structure in btrfs which
has a fixed location on a device. Since we cannot overwrite in a
sequential write required zone, we cannot place superblock in the zone.
One easy solution is limiting superblock and copies to be placed only in
conventional zones. However, this method has two downsides: one is
reduced number of superblock copies. The location of the second copy of
superblock is 256GB, which is in a sequential write required zone on
typical devices in the market today. So, the number of superblock and
copies is limited to be two. Second downside is that we cannot support
devices which have no conventional zones at all.
To solve these two problems, we employ superblock log writing. It uses
two adjacent zones as a circular buffer to write updated superblocks.
Once the first zone is filled up, start writing into the second one.
Then, when both zones are filled up and before starting to write to the
first zone again, it reset the first zone.
We can determine the position of the latest superblock by reading write
pointer information from a device. One corner case is when both zones
are full. For this situation, we read out the last superblock of each
zone, and compare them to determine which zone is older.
The following zones are reserved as the circular buffer on ZONED btrfs.
- The primary superblock: zones 0 and 1
- The first copy: zones 16 and 17
- The second copy: zones 1024 or zone at 256GB which is minimum, and
next to it
If these reserved zones are conventional, superblock is written fixed at
the start of the zone without logging.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2020-11-10 19:26:14 +08:00
|
|
|
/* Number of superblock log zones */
|
|
|
|
#define BTRFS_NR_SB_LOG_ZONES 2
|
|
|
|
|
2021-08-19 20:19:15 +08:00
|
|
|
/*
|
|
|
|
* Minimum of active zones we need:
|
|
|
|
*
|
|
|
|
* - BTRFS_SUPER_MIRROR_MAX zones for superblock mirrors
|
|
|
|
* - 3 zones to ensure at least one zone per SYSTEM, META and DATA block group
|
|
|
|
* - 1 zone for tree-log dedicated block group
|
|
|
|
* - 1 zone for relocation
|
|
|
|
*/
|
|
|
|
#define BTRFS_MIN_ACTIVE_ZONES (BTRFS_SUPER_MIRROR_MAX + 5)
|
|
|
|
|
btrfs: zoned: move superblock logging zone location
Moves the location of the superblock logging zones. The new locations of
the logging zones are now determined based on fixed block addresses
instead of on fixed zone numbers.
The old placement method based on fixed zone numbers causes problems when
one needs to inspect a file system image without access to the drive zone
information. In such case, the super block locations cannot be reliably
determined as the zone size is unknown. By locating the superblock logging
zones using fixed addresses, we can scan a dumped file system image without
the zone information since a super block copy will always be present at or
after the fixed known locations.
Introduce the following three pairs of zones containing fixed offset
locations, regardless of the device zone size.
- primary superblock: offset 0B (and the following zone)
- first copy: offset 512G (and the following zone)
- Second copy: offset 4T (4096G, and the following zone)
If a logging zone is outside of the disk capacity, we do not record the
superblock copy.
The first copy position is much larger than for a non-zoned filesystem,
which is at 64M. This is to avoid overlapping with the log zones for
the primary superblock. This higher location is arbitrary but allows
supporting devices with very large zone sizes, plus some space around in
between.
Such large zone size is unrealistic and very unlikely to ever be seen in
real devices. Currently, SMR disks have a zone size of 256MB, and we are
expecting ZNS drives to be in the 1-4GB range, so this limit gives us
room to breathe. For now, we only allow zone sizes up to 8GB. The
maximum zone size that would still fit in the space is 256G.
The fixed location addresses are somewhat arbitrary, with the intent of
maintaining superblock reliability for smaller and larger devices, with
the preference for the latter. For this reason, there are two superblocks
under the first 1T. This should cover use cases for physical devices and
for emulated/device-mapper devices.
The superblock logging zones are reserved for superblock logging and
never used for data or metadata blocks. Note that we only reserve the
two zones per primary/copy actually used for superblock logging. We do
not reserve the ranges of zones possibly containing superblocks with the
largest supported zone size (0-16GB, 512G-528GB, 4096G-4112G).
The zones containing the fixed location offsets used to store
superblocks on a non-zoned volume are also reserved to avoid confusion.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2021-04-08 16:25:28 +08:00
|
|
|
/*
|
2022-05-13 23:52:52 +08:00
|
|
|
* Minimum / maximum supported zone size. Currently, SMR disks have a zone
|
|
|
|
* size of 256MiB, and we are expecting ZNS drives to be in the 1-4GiB range.
|
|
|
|
* We do not expect the zone size to become larger than 8GiB or smaller than
|
|
|
|
* 4MiB in the near future.
|
btrfs: zoned: move superblock logging zone location
Moves the location of the superblock logging zones. The new locations of
the logging zones are now determined based on fixed block addresses
instead of on fixed zone numbers.
The old placement method based on fixed zone numbers causes problems when
one needs to inspect a file system image without access to the drive zone
information. In such case, the super block locations cannot be reliably
determined as the zone size is unknown. By locating the superblock logging
zones using fixed addresses, we can scan a dumped file system image without
the zone information since a super block copy will always be present at or
after the fixed known locations.
Introduce the following three pairs of zones containing fixed offset
locations, regardless of the device zone size.
- primary superblock: offset 0B (and the following zone)
- first copy: offset 512G (and the following zone)
- Second copy: offset 4T (4096G, and the following zone)
If a logging zone is outside of the disk capacity, we do not record the
superblock copy.
The first copy position is much larger than for a non-zoned filesystem,
which is at 64M. This is to avoid overlapping with the log zones for
the primary superblock. This higher location is arbitrary but allows
supporting devices with very large zone sizes, plus some space around in
between.
Such large zone size is unrealistic and very unlikely to ever be seen in
real devices. Currently, SMR disks have a zone size of 256MB, and we are
expecting ZNS drives to be in the 1-4GB range, so this limit gives us
room to breathe. For now, we only allow zone sizes up to 8GB. The
maximum zone size that would still fit in the space is 256G.
The fixed location addresses are somewhat arbitrary, with the intent of
maintaining superblock reliability for smaller and larger devices, with
the preference for the latter. For this reason, there are two superblocks
under the first 1T. This should cover use cases for physical devices and
for emulated/device-mapper devices.
The superblock logging zones are reserved for superblock logging and
never used for data or metadata blocks. Note that we only reserve the
two zones per primary/copy actually used for superblock logging. We do
not reserve the ranges of zones possibly containing superblocks with the
largest supported zone size (0-16GB, 512G-528GB, 4096G-4112G).
The zones containing the fixed location offsets used to store
superblocks on a non-zoned volume are also reserved to avoid confusion.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2021-04-08 16:25:28 +08:00
|
|
|
*/
|
|
|
|
#define BTRFS_MAX_ZONE_SIZE SZ_8G
|
2022-05-13 23:52:52 +08:00
|
|
|
#define BTRFS_MIN_ZONE_SIZE SZ_4M
|
btrfs: zoned: move superblock logging zone location
Moves the location of the superblock logging zones. The new locations of
the logging zones are now determined based on fixed block addresses
instead of on fixed zone numbers.
The old placement method based on fixed zone numbers causes problems when
one needs to inspect a file system image without access to the drive zone
information. In such case, the super block locations cannot be reliably
determined as the zone size is unknown. By locating the superblock logging
zones using fixed addresses, we can scan a dumped file system image without
the zone information since a super block copy will always be present at or
after the fixed known locations.
Introduce the following three pairs of zones containing fixed offset
locations, regardless of the device zone size.
- primary superblock: offset 0B (and the following zone)
- first copy: offset 512G (and the following zone)
- Second copy: offset 4T (4096G, and the following zone)
If a logging zone is outside of the disk capacity, we do not record the
superblock copy.
The first copy position is much larger than for a non-zoned filesystem,
which is at 64M. This is to avoid overlapping with the log zones for
the primary superblock. This higher location is arbitrary but allows
supporting devices with very large zone sizes, plus some space around in
between.
Such large zone size is unrealistic and very unlikely to ever be seen in
real devices. Currently, SMR disks have a zone size of 256MB, and we are
expecting ZNS drives to be in the 1-4GB range, so this limit gives us
room to breathe. For now, we only allow zone sizes up to 8GB. The
maximum zone size that would still fit in the space is 256G.
The fixed location addresses are somewhat arbitrary, with the intent of
maintaining superblock reliability for smaller and larger devices, with
the preference for the latter. For this reason, there are two superblocks
under the first 1T. This should cover use cases for physical devices and
for emulated/device-mapper devices.
The superblock logging zones are reserved for superblock logging and
never used for data or metadata blocks. Note that we only reserve the
two zones per primary/copy actually used for superblock logging. We do
not reserve the ranges of zones possibly containing superblocks with the
largest supported zone size (0-16GB, 512G-528GB, 4096G-4112G).
The zones containing the fixed location offsets used to store
superblocks on a non-zoned volume are also reserved to avoid confusion.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2021-04-08 16:25:28 +08:00
|
|
|
|
2021-08-19 20:19:12 +08:00
|
|
|
#define SUPER_INFO_SECTORS ((u64)BTRFS_SUPER_INFO_SIZE >> SECTOR_SHIFT)
|
|
|
|
|
|
|
|
static inline bool sb_zone_is_full(const struct blk_zone *zone)
|
|
|
|
{
|
|
|
|
return (zone->cond == BLK_ZONE_COND_FULL) ||
|
|
|
|
(zone->wp + SUPER_INFO_SECTORS > zone->start + zone->capacity);
|
|
|
|
}
|
|
|
|
|
2020-11-10 19:26:07 +08:00
|
|
|
static int copy_zone_info_cb(struct blk_zone *zone, unsigned int idx, void *data)
|
|
|
|
{
|
|
|
|
struct blk_zone *zones = data;
|
|
|
|
|
|
|
|
memcpy(&zones[idx], zone, sizeof(*zone));
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
btrfs: implement log-structured superblock for ZONED mode
Superblock (and its copies) is the only data structure in btrfs which
has a fixed location on a device. Since we cannot overwrite in a
sequential write required zone, we cannot place superblock in the zone.
One easy solution is limiting superblock and copies to be placed only in
conventional zones. However, this method has two downsides: one is
reduced number of superblock copies. The location of the second copy of
superblock is 256GB, which is in a sequential write required zone on
typical devices in the market today. So, the number of superblock and
copies is limited to be two. Second downside is that we cannot support
devices which have no conventional zones at all.
To solve these two problems, we employ superblock log writing. It uses
two adjacent zones as a circular buffer to write updated superblocks.
Once the first zone is filled up, start writing into the second one.
Then, when both zones are filled up and before starting to write to the
first zone again, it reset the first zone.
We can determine the position of the latest superblock by reading write
pointer information from a device. One corner case is when both zones
are full. For this situation, we read out the last superblock of each
zone, and compare them to determine which zone is older.
The following zones are reserved as the circular buffer on ZONED btrfs.
- The primary superblock: zones 0 and 1
- The first copy: zones 16 and 17
- The second copy: zones 1024 or zone at 256GB which is minimum, and
next to it
If these reserved zones are conventional, superblock is written fixed at
the start of the zone without logging.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2020-11-10 19:26:14 +08:00
|
|
|
static int sb_write_pointer(struct block_device *bdev, struct blk_zone *zones,
|
|
|
|
u64 *wp_ret)
|
|
|
|
{
|
|
|
|
bool empty[BTRFS_NR_SB_LOG_ZONES];
|
|
|
|
bool full[BTRFS_NR_SB_LOG_ZONES];
|
|
|
|
sector_t sector;
|
2021-08-19 20:19:12 +08:00
|
|
|
int i;
|
btrfs: implement log-structured superblock for ZONED mode
Superblock (and its copies) is the only data structure in btrfs which
has a fixed location on a device. Since we cannot overwrite in a
sequential write required zone, we cannot place superblock in the zone.
One easy solution is limiting superblock and copies to be placed only in
conventional zones. However, this method has two downsides: one is
reduced number of superblock copies. The location of the second copy of
superblock is 256GB, which is in a sequential write required zone on
typical devices in the market today. So, the number of superblock and
copies is limited to be two. Second downside is that we cannot support
devices which have no conventional zones at all.
To solve these two problems, we employ superblock log writing. It uses
two adjacent zones as a circular buffer to write updated superblocks.
Once the first zone is filled up, start writing into the second one.
Then, when both zones are filled up and before starting to write to the
first zone again, it reset the first zone.
We can determine the position of the latest superblock by reading write
pointer information from a device. One corner case is when both zones
are full. For this situation, we read out the last superblock of each
zone, and compare them to determine which zone is older.
The following zones are reserved as the circular buffer on ZONED btrfs.
- The primary superblock: zones 0 and 1
- The first copy: zones 16 and 17
- The second copy: zones 1024 or zone at 256GB which is minimum, and
next to it
If these reserved zones are conventional, superblock is written fixed at
the start of the zone without logging.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2020-11-10 19:26:14 +08:00
|
|
|
|
2021-08-19 20:19:12 +08:00
|
|
|
for (i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++) {
|
|
|
|
ASSERT(zones[i].type != BLK_ZONE_TYPE_CONVENTIONAL);
|
|
|
|
empty[i] = (zones[i].cond == BLK_ZONE_COND_EMPTY);
|
|
|
|
full[i] = sb_zone_is_full(&zones[i]);
|
|
|
|
}
|
btrfs: implement log-structured superblock for ZONED mode
Superblock (and its copies) is the only data structure in btrfs which
has a fixed location on a device. Since we cannot overwrite in a
sequential write required zone, we cannot place superblock in the zone.
One easy solution is limiting superblock and copies to be placed only in
conventional zones. However, this method has two downsides: one is
reduced number of superblock copies. The location of the second copy of
superblock is 256GB, which is in a sequential write required zone on
typical devices in the market today. So, the number of superblock and
copies is limited to be two. Second downside is that we cannot support
devices which have no conventional zones at all.
To solve these two problems, we employ superblock log writing. It uses
two adjacent zones as a circular buffer to write updated superblocks.
Once the first zone is filled up, start writing into the second one.
Then, when both zones are filled up and before starting to write to the
first zone again, it reset the first zone.
We can determine the position of the latest superblock by reading write
pointer information from a device. One corner case is when both zones
are full. For this situation, we read out the last superblock of each
zone, and compare them to determine which zone is older.
The following zones are reserved as the circular buffer on ZONED btrfs.
- The primary superblock: zones 0 and 1
- The first copy: zones 16 and 17
- The second copy: zones 1024 or zone at 256GB which is minimum, and
next to it
If these reserved zones are conventional, superblock is written fixed at
the start of the zone without logging.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2020-11-10 19:26:14 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Possible states of log buffer zones
|
|
|
|
*
|
|
|
|
* Empty[0] In use[0] Full[0]
|
2022-05-18 02:45:32 +08:00
|
|
|
* Empty[1] * 0 1
|
|
|
|
* In use[1] x x 1
|
|
|
|
* Full[1] 0 0 C
|
btrfs: implement log-structured superblock for ZONED mode
Superblock (and its copies) is the only data structure in btrfs which
has a fixed location on a device. Since we cannot overwrite in a
sequential write required zone, we cannot place superblock in the zone.
One easy solution is limiting superblock and copies to be placed only in
conventional zones. However, this method has two downsides: one is
reduced number of superblock copies. The location of the second copy of
superblock is 256GB, which is in a sequential write required zone on
typical devices in the market today. So, the number of superblock and
copies is limited to be two. Second downside is that we cannot support
devices which have no conventional zones at all.
To solve these two problems, we employ superblock log writing. It uses
two adjacent zones as a circular buffer to write updated superblocks.
Once the first zone is filled up, start writing into the second one.
Then, when both zones are filled up and before starting to write to the
first zone again, it reset the first zone.
We can determine the position of the latest superblock by reading write
pointer information from a device. One corner case is when both zones
are full. For this situation, we read out the last superblock of each
zone, and compare them to determine which zone is older.
The following zones are reserved as the circular buffer on ZONED btrfs.
- The primary superblock: zones 0 and 1
- The first copy: zones 16 and 17
- The second copy: zones 1024 or zone at 256GB which is minimum, and
next to it
If these reserved zones are conventional, superblock is written fixed at
the start of the zone without logging.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2020-11-10 19:26:14 +08:00
|
|
|
*
|
|
|
|
* Log position:
|
|
|
|
* *: Special case, no superblock is written
|
|
|
|
* 0: Use write pointer of zones[0]
|
|
|
|
* 1: Use write pointer of zones[1]
|
2021-05-21 23:42:23 +08:00
|
|
|
* C: Compare super blocks from zones[0] and zones[1], use the latest
|
btrfs: implement log-structured superblock for ZONED mode
Superblock (and its copies) is the only data structure in btrfs which
has a fixed location on a device. Since we cannot overwrite in a
sequential write required zone, we cannot place superblock in the zone.
One easy solution is limiting superblock and copies to be placed only in
conventional zones. However, this method has two downsides: one is
reduced number of superblock copies. The location of the second copy of
superblock is 256GB, which is in a sequential write required zone on
typical devices in the market today. So, the number of superblock and
copies is limited to be two. Second downside is that we cannot support
devices which have no conventional zones at all.
To solve these two problems, we employ superblock log writing. It uses
two adjacent zones as a circular buffer to write updated superblocks.
Once the first zone is filled up, start writing into the second one.
Then, when both zones are filled up and before starting to write to the
first zone again, it reset the first zone.
We can determine the position of the latest superblock by reading write
pointer information from a device. One corner case is when both zones
are full. For this situation, we read out the last superblock of each
zone, and compare them to determine which zone is older.
The following zones are reserved as the circular buffer on ZONED btrfs.
- The primary superblock: zones 0 and 1
- The first copy: zones 16 and 17
- The second copy: zones 1024 or zone at 256GB which is minimum, and
next to it
If these reserved zones are conventional, superblock is written fixed at
the start of the zone without logging.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2020-11-10 19:26:14 +08:00
|
|
|
* one determined by generation
|
|
|
|
* x: Invalid state
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (empty[0] && empty[1]) {
|
|
|
|
/* Special case to distinguish no superblock to read */
|
|
|
|
*wp_ret = zones[0].start << SECTOR_SHIFT;
|
|
|
|
return -ENOENT;
|
|
|
|
} else if (full[0] && full[1]) {
|
|
|
|
/* Compare two super blocks */
|
|
|
|
struct address_space *mapping = bdev->bd_inode->i_mapping;
|
|
|
|
struct page *page[BTRFS_NR_SB_LOG_ZONES];
|
|
|
|
struct btrfs_super_block *super[BTRFS_NR_SB_LOG_ZONES];
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++) {
|
|
|
|
u64 bytenr;
|
|
|
|
|
|
|
|
bytenr = ((zones[i].start + zones[i].len)
|
|
|
|
<< SECTOR_SHIFT) - BTRFS_SUPER_INFO_SIZE;
|
|
|
|
|
|
|
|
page[i] = read_cache_page_gfp(mapping,
|
|
|
|
bytenr >> PAGE_SHIFT, GFP_NOFS);
|
|
|
|
if (IS_ERR(page[i])) {
|
|
|
|
if (i == 1)
|
|
|
|
btrfs_release_disk_super(super[0]);
|
|
|
|
return PTR_ERR(page[i]);
|
|
|
|
}
|
|
|
|
super[i] = page_address(page[i]);
|
|
|
|
}
|
|
|
|
|
2022-11-15 17:39:44 +08:00
|
|
|
if (btrfs_super_generation(super[0]) >
|
|
|
|
btrfs_super_generation(super[1]))
|
btrfs: implement log-structured superblock for ZONED mode
Superblock (and its copies) is the only data structure in btrfs which
has a fixed location on a device. Since we cannot overwrite in a
sequential write required zone, we cannot place superblock in the zone.
One easy solution is limiting superblock and copies to be placed only in
conventional zones. However, this method has two downsides: one is
reduced number of superblock copies. The location of the second copy of
superblock is 256GB, which is in a sequential write required zone on
typical devices in the market today. So, the number of superblock and
copies is limited to be two. Second downside is that we cannot support
devices which have no conventional zones at all.
To solve these two problems, we employ superblock log writing. It uses
two adjacent zones as a circular buffer to write updated superblocks.
Once the first zone is filled up, start writing into the second one.
Then, when both zones are filled up and before starting to write to the
first zone again, it reset the first zone.
We can determine the position of the latest superblock by reading write
pointer information from a device. One corner case is when both zones
are full. For this situation, we read out the last superblock of each
zone, and compare them to determine which zone is older.
The following zones are reserved as the circular buffer on ZONED btrfs.
- The primary superblock: zones 0 and 1
- The first copy: zones 16 and 17
- The second copy: zones 1024 or zone at 256GB which is minimum, and
next to it
If these reserved zones are conventional, superblock is written fixed at
the start of the zone without logging.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2020-11-10 19:26:14 +08:00
|
|
|
sector = zones[1].start;
|
|
|
|
else
|
|
|
|
sector = zones[0].start;
|
|
|
|
|
|
|
|
for (i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++)
|
|
|
|
btrfs_release_disk_super(super[i]);
|
|
|
|
} else if (!full[0] && (empty[1] || full[1])) {
|
|
|
|
sector = zones[0].wp;
|
|
|
|
} else if (full[0]) {
|
|
|
|
sector = zones[1].wp;
|
|
|
|
} else {
|
|
|
|
return -EUCLEAN;
|
|
|
|
}
|
|
|
|
*wp_ret = sector << SECTOR_SHIFT;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
btrfs: zoned: move superblock logging zone location
Moves the location of the superblock logging zones. The new locations of
the logging zones are now determined based on fixed block addresses
instead of on fixed zone numbers.
The old placement method based on fixed zone numbers causes problems when
one needs to inspect a file system image without access to the drive zone
information. In such case, the super block locations cannot be reliably
determined as the zone size is unknown. By locating the superblock logging
zones using fixed addresses, we can scan a dumped file system image without
the zone information since a super block copy will always be present at or
after the fixed known locations.
Introduce the following three pairs of zones containing fixed offset
locations, regardless of the device zone size.
- primary superblock: offset 0B (and the following zone)
- first copy: offset 512G (and the following zone)
- Second copy: offset 4T (4096G, and the following zone)
If a logging zone is outside of the disk capacity, we do not record the
superblock copy.
The first copy position is much larger than for a non-zoned filesystem,
which is at 64M. This is to avoid overlapping with the log zones for
the primary superblock. This higher location is arbitrary but allows
supporting devices with very large zone sizes, plus some space around in
between.
Such large zone size is unrealistic and very unlikely to ever be seen in
real devices. Currently, SMR disks have a zone size of 256MB, and we are
expecting ZNS drives to be in the 1-4GB range, so this limit gives us
room to breathe. For now, we only allow zone sizes up to 8GB. The
maximum zone size that would still fit in the space is 256G.
The fixed location addresses are somewhat arbitrary, with the intent of
maintaining superblock reliability for smaller and larger devices, with
the preference for the latter. For this reason, there are two superblocks
under the first 1T. This should cover use cases for physical devices and
for emulated/device-mapper devices.
The superblock logging zones are reserved for superblock logging and
never used for data or metadata blocks. Note that we only reserve the
two zones per primary/copy actually used for superblock logging. We do
not reserve the ranges of zones possibly containing superblocks with the
largest supported zone size (0-16GB, 512G-528GB, 4096G-4112G).
The zones containing the fixed location offsets used to store
superblocks on a non-zoned volume are also reserved to avoid confusion.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2021-04-08 16:25:28 +08:00
|
|
|
* Get the first zone number of the superblock mirror
|
btrfs: implement log-structured superblock for ZONED mode
Superblock (and its copies) is the only data structure in btrfs which
has a fixed location on a device. Since we cannot overwrite in a
sequential write required zone, we cannot place superblock in the zone.
One easy solution is limiting superblock and copies to be placed only in
conventional zones. However, this method has two downsides: one is
reduced number of superblock copies. The location of the second copy of
superblock is 256GB, which is in a sequential write required zone on
typical devices in the market today. So, the number of superblock and
copies is limited to be two. Second downside is that we cannot support
devices which have no conventional zones at all.
To solve these two problems, we employ superblock log writing. It uses
two adjacent zones as a circular buffer to write updated superblocks.
Once the first zone is filled up, start writing into the second one.
Then, when both zones are filled up and before starting to write to the
first zone again, it reset the first zone.
We can determine the position of the latest superblock by reading write
pointer information from a device. One corner case is when both zones
are full. For this situation, we read out the last superblock of each
zone, and compare them to determine which zone is older.
The following zones are reserved as the circular buffer on ZONED btrfs.
- The primary superblock: zones 0 and 1
- The first copy: zones 16 and 17
- The second copy: zones 1024 or zone at 256GB which is minimum, and
next to it
If these reserved zones are conventional, superblock is written fixed at
the start of the zone without logging.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2020-11-10 19:26:14 +08:00
|
|
|
*/
|
|
|
|
static inline u32 sb_zone_number(int shift, int mirror)
|
|
|
|
{
|
2022-12-17 04:15:57 +08:00
|
|
|
u64 zone = U64_MAX;
|
btrfs: implement log-structured superblock for ZONED mode
Superblock (and its copies) is the only data structure in btrfs which
has a fixed location on a device. Since we cannot overwrite in a
sequential write required zone, we cannot place superblock in the zone.
One easy solution is limiting superblock and copies to be placed only in
conventional zones. However, this method has two downsides: one is
reduced number of superblock copies. The location of the second copy of
superblock is 256GB, which is in a sequential write required zone on
typical devices in the market today. So, the number of superblock and
copies is limited to be two. Second downside is that we cannot support
devices which have no conventional zones at all.
To solve these two problems, we employ superblock log writing. It uses
two adjacent zones as a circular buffer to write updated superblocks.
Once the first zone is filled up, start writing into the second one.
Then, when both zones are filled up and before starting to write to the
first zone again, it reset the first zone.
We can determine the position of the latest superblock by reading write
pointer information from a device. One corner case is when both zones
are full. For this situation, we read out the last superblock of each
zone, and compare them to determine which zone is older.
The following zones are reserved as the circular buffer on ZONED btrfs.
- The primary superblock: zones 0 and 1
- The first copy: zones 16 and 17
- The second copy: zones 1024 or zone at 256GB which is minimum, and
next to it
If these reserved zones are conventional, superblock is written fixed at
the start of the zone without logging.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2020-11-10 19:26:14 +08:00
|
|
|
|
btrfs: zoned: move superblock logging zone location
Moves the location of the superblock logging zones. The new locations of
the logging zones are now determined based on fixed block addresses
instead of on fixed zone numbers.
The old placement method based on fixed zone numbers causes problems when
one needs to inspect a file system image without access to the drive zone
information. In such case, the super block locations cannot be reliably
determined as the zone size is unknown. By locating the superblock logging
zones using fixed addresses, we can scan a dumped file system image without
the zone information since a super block copy will always be present at or
after the fixed known locations.
Introduce the following three pairs of zones containing fixed offset
locations, regardless of the device zone size.
- primary superblock: offset 0B (and the following zone)
- first copy: offset 512G (and the following zone)
- Second copy: offset 4T (4096G, and the following zone)
If a logging zone is outside of the disk capacity, we do not record the
superblock copy.
The first copy position is much larger than for a non-zoned filesystem,
which is at 64M. This is to avoid overlapping with the log zones for
the primary superblock. This higher location is arbitrary but allows
supporting devices with very large zone sizes, plus some space around in
between.
Such large zone size is unrealistic and very unlikely to ever be seen in
real devices. Currently, SMR disks have a zone size of 256MB, and we are
expecting ZNS drives to be in the 1-4GB range, so this limit gives us
room to breathe. For now, we only allow zone sizes up to 8GB. The
maximum zone size that would still fit in the space is 256G.
The fixed location addresses are somewhat arbitrary, with the intent of
maintaining superblock reliability for smaller and larger devices, with
the preference for the latter. For this reason, there are two superblocks
under the first 1T. This should cover use cases for physical devices and
for emulated/device-mapper devices.
The superblock logging zones are reserved for superblock logging and
never used for data or metadata blocks. Note that we only reserve the
two zones per primary/copy actually used for superblock logging. We do
not reserve the ranges of zones possibly containing superblocks with the
largest supported zone size (0-16GB, 512G-528GB, 4096G-4112G).
The zones containing the fixed location offsets used to store
superblocks on a non-zoned volume are also reserved to avoid confusion.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2021-04-08 16:25:28 +08:00
|
|
|
ASSERT(mirror < BTRFS_SUPER_MIRROR_MAX);
|
btrfs: implement log-structured superblock for ZONED mode
Superblock (and its copies) is the only data structure in btrfs which
has a fixed location on a device. Since we cannot overwrite in a
sequential write required zone, we cannot place superblock in the zone.
One easy solution is limiting superblock and copies to be placed only in
conventional zones. However, this method has two downsides: one is
reduced number of superblock copies. The location of the second copy of
superblock is 256GB, which is in a sequential write required zone on
typical devices in the market today. So, the number of superblock and
copies is limited to be two. Second downside is that we cannot support
devices which have no conventional zones at all.
To solve these two problems, we employ superblock log writing. It uses
two adjacent zones as a circular buffer to write updated superblocks.
Once the first zone is filled up, start writing into the second one.
Then, when both zones are filled up and before starting to write to the
first zone again, it reset the first zone.
We can determine the position of the latest superblock by reading write
pointer information from a device. One corner case is when both zones
are full. For this situation, we read out the last superblock of each
zone, and compare them to determine which zone is older.
The following zones are reserved as the circular buffer on ZONED btrfs.
- The primary superblock: zones 0 and 1
- The first copy: zones 16 and 17
- The second copy: zones 1024 or zone at 256GB which is minimum, and
next to it
If these reserved zones are conventional, superblock is written fixed at
the start of the zone without logging.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2020-11-10 19:26:14 +08:00
|
|
|
switch (mirror) {
|
btrfs: zoned: move superblock logging zone location
Moves the location of the superblock logging zones. The new locations of
the logging zones are now determined based on fixed block addresses
instead of on fixed zone numbers.
The old placement method based on fixed zone numbers causes problems when
one needs to inspect a file system image without access to the drive zone
information. In such case, the super block locations cannot be reliably
determined as the zone size is unknown. By locating the superblock logging
zones using fixed addresses, we can scan a dumped file system image without
the zone information since a super block copy will always be present at or
after the fixed known locations.
Introduce the following three pairs of zones containing fixed offset
locations, regardless of the device zone size.
- primary superblock: offset 0B (and the following zone)
- first copy: offset 512G (and the following zone)
- Second copy: offset 4T (4096G, and the following zone)
If a logging zone is outside of the disk capacity, we do not record the
superblock copy.
The first copy position is much larger than for a non-zoned filesystem,
which is at 64M. This is to avoid overlapping with the log zones for
the primary superblock. This higher location is arbitrary but allows
supporting devices with very large zone sizes, plus some space around in
between.
Such large zone size is unrealistic and very unlikely to ever be seen in
real devices. Currently, SMR disks have a zone size of 256MB, and we are
expecting ZNS drives to be in the 1-4GB range, so this limit gives us
room to breathe. For now, we only allow zone sizes up to 8GB. The
maximum zone size that would still fit in the space is 256G.
The fixed location addresses are somewhat arbitrary, with the intent of
maintaining superblock reliability for smaller and larger devices, with
the preference for the latter. For this reason, there are two superblocks
under the first 1T. This should cover use cases for physical devices and
for emulated/device-mapper devices.
The superblock logging zones are reserved for superblock logging and
never used for data or metadata blocks. Note that we only reserve the
two zones per primary/copy actually used for superblock logging. We do
not reserve the ranges of zones possibly containing superblocks with the
largest supported zone size (0-16GB, 512G-528GB, 4096G-4112G).
The zones containing the fixed location offsets used to store
superblocks on a non-zoned volume are also reserved to avoid confusion.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2021-04-08 16:25:28 +08:00
|
|
|
case 0: zone = 0; break;
|
|
|
|
case 1: zone = 1ULL << (BTRFS_SB_LOG_FIRST_SHIFT - shift); break;
|
|
|
|
case 2: zone = 1ULL << (BTRFS_SB_LOG_SECOND_SHIFT - shift); break;
|
btrfs: implement log-structured superblock for ZONED mode
Superblock (and its copies) is the only data structure in btrfs which
has a fixed location on a device. Since we cannot overwrite in a
sequential write required zone, we cannot place superblock in the zone.
One easy solution is limiting superblock and copies to be placed only in
conventional zones. However, this method has two downsides: one is
reduced number of superblock copies. The location of the second copy of
superblock is 256GB, which is in a sequential write required zone on
typical devices in the market today. So, the number of superblock and
copies is limited to be two. Second downside is that we cannot support
devices which have no conventional zones at all.
To solve these two problems, we employ superblock log writing. It uses
two adjacent zones as a circular buffer to write updated superblocks.
Once the first zone is filled up, start writing into the second one.
Then, when both zones are filled up and before starting to write to the
first zone again, it reset the first zone.
We can determine the position of the latest superblock by reading write
pointer information from a device. One corner case is when both zones
are full. For this situation, we read out the last superblock of each
zone, and compare them to determine which zone is older.
The following zones are reserved as the circular buffer on ZONED btrfs.
- The primary superblock: zones 0 and 1
- The first copy: zones 16 and 17
- The second copy: zones 1024 or zone at 256GB which is minimum, and
next to it
If these reserved zones are conventional, superblock is written fixed at
the start of the zone without logging.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2020-11-10 19:26:14 +08:00
|
|
|
}
|
|
|
|
|
btrfs: zoned: move superblock logging zone location
Moves the location of the superblock logging zones. The new locations of
the logging zones are now determined based on fixed block addresses
instead of on fixed zone numbers.
The old placement method based on fixed zone numbers causes problems when
one needs to inspect a file system image without access to the drive zone
information. In such case, the super block locations cannot be reliably
determined as the zone size is unknown. By locating the superblock logging
zones using fixed addresses, we can scan a dumped file system image without
the zone information since a super block copy will always be present at or
after the fixed known locations.
Introduce the following three pairs of zones containing fixed offset
locations, regardless of the device zone size.
- primary superblock: offset 0B (and the following zone)
- first copy: offset 512G (and the following zone)
- Second copy: offset 4T (4096G, and the following zone)
If a logging zone is outside of the disk capacity, we do not record the
superblock copy.
The first copy position is much larger than for a non-zoned filesystem,
which is at 64M. This is to avoid overlapping with the log zones for
the primary superblock. This higher location is arbitrary but allows
supporting devices with very large zone sizes, plus some space around in
between.
Such large zone size is unrealistic and very unlikely to ever be seen in
real devices. Currently, SMR disks have a zone size of 256MB, and we are
expecting ZNS drives to be in the 1-4GB range, so this limit gives us
room to breathe. For now, we only allow zone sizes up to 8GB. The
maximum zone size that would still fit in the space is 256G.
The fixed location addresses are somewhat arbitrary, with the intent of
maintaining superblock reliability for smaller and larger devices, with
the preference for the latter. For this reason, there are two superblocks
under the first 1T. This should cover use cases for physical devices and
for emulated/device-mapper devices.
The superblock logging zones are reserved for superblock logging and
never used for data or metadata blocks. Note that we only reserve the
two zones per primary/copy actually used for superblock logging. We do
not reserve the ranges of zones possibly containing superblocks with the
largest supported zone size (0-16GB, 512G-528GB, 4096G-4112G).
The zones containing the fixed location offsets used to store
superblocks on a non-zoned volume are also reserved to avoid confusion.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2021-04-08 16:25:28 +08:00
|
|
|
ASSERT(zone <= U32_MAX);
|
|
|
|
|
|
|
|
return (u32)zone;
|
btrfs: implement log-structured superblock for ZONED mode
Superblock (and its copies) is the only data structure in btrfs which
has a fixed location on a device. Since we cannot overwrite in a
sequential write required zone, we cannot place superblock in the zone.
One easy solution is limiting superblock and copies to be placed only in
conventional zones. However, this method has two downsides: one is
reduced number of superblock copies. The location of the second copy of
superblock is 256GB, which is in a sequential write required zone on
typical devices in the market today. So, the number of superblock and
copies is limited to be two. Second downside is that we cannot support
devices which have no conventional zones at all.
To solve these two problems, we employ superblock log writing. It uses
two adjacent zones as a circular buffer to write updated superblocks.
Once the first zone is filled up, start writing into the second one.
Then, when both zones are filled up and before starting to write to the
first zone again, it reset the first zone.
We can determine the position of the latest superblock by reading write
pointer information from a device. One corner case is when both zones
are full. For this situation, we read out the last superblock of each
zone, and compare them to determine which zone is older.
The following zones are reserved as the circular buffer on ZONED btrfs.
- The primary superblock: zones 0 and 1
- The first copy: zones 16 and 17
- The second copy: zones 1024 or zone at 256GB which is minimum, and
next to it
If these reserved zones are conventional, superblock is written fixed at
the start of the zone without logging.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2020-11-10 19:26:14 +08:00
|
|
|
}
|
|
|
|
|
2021-05-27 14:27:32 +08:00
|
|
|
static inline sector_t zone_start_sector(u32 zone_number,
|
|
|
|
struct block_device *bdev)
|
|
|
|
{
|
|
|
|
return (sector_t)zone_number << ilog2(bdev_zone_sectors(bdev));
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline u64 zone_start_physical(u32 zone_number,
|
|
|
|
struct btrfs_zoned_device_info *zone_info)
|
|
|
|
{
|
|
|
|
return (u64)zone_number << zone_info->zone_size_shift;
|
|
|
|
}
|
|
|
|
|
2021-02-04 18:21:47 +08:00
|
|
|
/*
|
|
|
|
* Emulate blkdev_report_zones() for a non-zoned device. It slices up the block
|
|
|
|
* device into static sized chunks and fake a conventional zone on each of
|
|
|
|
* them.
|
|
|
|
*/
|
|
|
|
static int emulate_report_zones(struct btrfs_device *device, u64 pos,
|
|
|
|
struct blk_zone *zones, unsigned int nr_zones)
|
|
|
|
{
|
|
|
|
const sector_t zone_sectors = device->fs_info->zone_size >> SECTOR_SHIFT;
|
|
|
|
sector_t bdev_size = bdev_nr_sectors(device->bdev);
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
pos >>= SECTOR_SHIFT;
|
|
|
|
for (i = 0; i < nr_zones; i++) {
|
|
|
|
zones[i].start = i * zone_sectors + pos;
|
|
|
|
zones[i].len = zone_sectors;
|
|
|
|
zones[i].capacity = zone_sectors;
|
|
|
|
zones[i].wp = zones[i].start + zone_sectors;
|
|
|
|
zones[i].type = BLK_ZONE_TYPE_CONVENTIONAL;
|
|
|
|
zones[i].cond = BLK_ZONE_COND_NOT_WP;
|
|
|
|
|
|
|
|
if (zones[i].wp >= bdev_size) {
|
|
|
|
i++;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return i;
|
|
|
|
}
|
|
|
|
|
2020-11-10 19:26:07 +08:00
|
|
|
static int btrfs_get_dev_zones(struct btrfs_device *device, u64 pos,
|
|
|
|
struct blk_zone *zones, unsigned int *nr_zones)
|
|
|
|
{
|
2021-11-11 13:14:38 +08:00
|
|
|
struct btrfs_zoned_device_info *zinfo = device->zone_info;
|
2020-11-10 19:26:07 +08:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!*nr_zones)
|
|
|
|
return 0;
|
|
|
|
|
2021-02-04 18:21:47 +08:00
|
|
|
if (!bdev_is_zoned(device->bdev)) {
|
|
|
|
ret = emulate_report_zones(device, pos, zones, *nr_zones);
|
|
|
|
*nr_zones = ret;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-11-11 13:14:38 +08:00
|
|
|
/* Check cache */
|
|
|
|
if (zinfo->zone_cache) {
|
|
|
|
unsigned int i;
|
2022-12-22 00:47:45 +08:00
|
|
|
u32 zno;
|
2021-11-11 13:14:38 +08:00
|
|
|
|
|
|
|
ASSERT(IS_ALIGNED(pos, zinfo->zone_size));
|
|
|
|
zno = pos >> zinfo->zone_size_shift;
|
|
|
|
/*
|
|
|
|
* We cannot report zones beyond the zone end. So, it is OK to
|
|
|
|
* cap *nr_zones to at the end.
|
|
|
|
*/
|
|
|
|
*nr_zones = min_t(u32, *nr_zones, zinfo->nr_zones - zno);
|
|
|
|
|
|
|
|
for (i = 0; i < *nr_zones; i++) {
|
|
|
|
struct blk_zone *zone_info;
|
|
|
|
|
|
|
|
zone_info = &zinfo->zone_cache[zno + i];
|
|
|
|
if (!zone_info->len)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (i == *nr_zones) {
|
|
|
|
/* Cache hit on all the zones */
|
|
|
|
memcpy(zones, zinfo->zone_cache + zno,
|
|
|
|
sizeof(*zinfo->zone_cache) * *nr_zones);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-11-10 19:26:07 +08:00
|
|
|
ret = blkdev_report_zones(device->bdev, pos >> SECTOR_SHIFT, *nr_zones,
|
|
|
|
copy_zone_info_cb, zones);
|
|
|
|
if (ret < 0) {
|
|
|
|
btrfs_err_in_rcu(device->fs_info,
|
|
|
|
"zoned: failed to read zone %llu on %s (devid %llu)",
|
|
|
|
pos, rcu_str_deref(device->name),
|
|
|
|
device->devid);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
*nr_zones = ret;
|
|
|
|
if (!ret)
|
|
|
|
return -EIO;
|
|
|
|
|
2021-11-11 13:14:38 +08:00
|
|
|
/* Populate cache */
|
2022-12-22 00:47:45 +08:00
|
|
|
if (zinfo->zone_cache) {
|
|
|
|
u32 zno = pos >> zinfo->zone_size_shift;
|
|
|
|
|
2021-11-11 13:14:38 +08:00
|
|
|
memcpy(zinfo->zone_cache + zno, zones,
|
|
|
|
sizeof(*zinfo->zone_cache) * *nr_zones);
|
2022-12-22 00:47:45 +08:00
|
|
|
}
|
2021-11-11 13:14:38 +08:00
|
|
|
|
2020-11-10 19:26:07 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-02-04 18:21:47 +08:00
|
|
|
/* The emulated zone size is determined from the size of device extent */
|
|
|
|
static int calculate_emulated_zone_size(struct btrfs_fs_info *fs_info)
|
|
|
|
{
|
|
|
|
struct btrfs_path *path;
|
|
|
|
struct btrfs_root *root = fs_info->dev_root;
|
|
|
|
struct btrfs_key key;
|
|
|
|
struct extent_buffer *leaf;
|
|
|
|
struct btrfs_dev_extent *dext;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
key.objectid = 1;
|
|
|
|
key.type = BTRFS_DEV_EXTENT_KEY;
|
|
|
|
key.offset = 0;
|
|
|
|
|
|
|
|
path = btrfs_alloc_path();
|
|
|
|
if (!path)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
|
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
|
2021-07-13 21:58:03 +08:00
|
|
|
ret = btrfs_next_leaf(root, path);
|
2021-02-04 18:21:47 +08:00
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
|
|
|
/* No dev extents at all? Not good */
|
|
|
|
if (ret > 0) {
|
|
|
|
ret = -EUCLEAN;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
leaf = path->nodes[0];
|
|
|
|
dext = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_extent);
|
|
|
|
fs_info->zone_size = btrfs_dev_extent_length(leaf, dext);
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
out:
|
|
|
|
btrfs_free_path(path);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2021-02-04 18:21:42 +08:00
|
|
|
int btrfs_get_dev_zone_info_all_devices(struct btrfs_fs_info *fs_info)
|
|
|
|
{
|
|
|
|
struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
|
|
|
|
struct btrfs_device *device;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
/* fs_info->zone_size might not set yet. Use the incomapt flag here. */
|
|
|
|
if (!btrfs_fs_incompat(fs_info, ZONED))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
mutex_lock(&fs_devices->device_list_mutex);
|
|
|
|
list_for_each_entry(device, &fs_devices->devices, dev_list) {
|
|
|
|
/* We can skip reading of zone info for missing devices */
|
|
|
|
if (!device->bdev)
|
|
|
|
continue;
|
|
|
|
|
2021-11-11 13:14:38 +08:00
|
|
|
ret = btrfs_get_dev_zone_info(device, true);
|
2021-02-04 18:21:42 +08:00
|
|
|
if (ret)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
mutex_unlock(&fs_devices->device_list_mutex);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2021-11-11 13:14:38 +08:00
|
|
|
int btrfs_get_dev_zone_info(struct btrfs_device *device, bool populate_cache)
|
2020-11-10 19:26:07 +08:00
|
|
|
{
|
2021-02-04 18:21:47 +08:00
|
|
|
struct btrfs_fs_info *fs_info = device->fs_info;
|
2020-11-10 19:26:07 +08:00
|
|
|
struct btrfs_zoned_device_info *zone_info = NULL;
|
|
|
|
struct block_device *bdev = device->bdev;
|
2021-08-19 20:19:15 +08:00
|
|
|
unsigned int max_active_zones;
|
|
|
|
unsigned int nactive;
|
2020-11-10 19:26:07 +08:00
|
|
|
sector_t nr_sectors;
|
|
|
|
sector_t sector = 0;
|
|
|
|
struct blk_zone *zones = NULL;
|
|
|
|
unsigned int i, nreported = 0, nr_zones;
|
2021-03-03 16:55:46 +08:00
|
|
|
sector_t zone_sectors;
|
2021-02-04 18:21:47 +08:00
|
|
|
char *model, *emulated;
|
2020-11-10 19:26:07 +08:00
|
|
|
int ret;
|
|
|
|
|
2021-02-04 18:21:47 +08:00
|
|
|
/*
|
|
|
|
* Cannot use btrfs_is_zoned here, since fs_info::zone_size might not
|
|
|
|
* yet be set.
|
|
|
|
*/
|
|
|
|
if (!btrfs_fs_incompat(fs_info, ZONED))
|
2020-11-10 19:26:07 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (device->zone_info)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
zone_info = kzalloc(sizeof(*zone_info), GFP_KERNEL);
|
|
|
|
if (!zone_info)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2021-11-11 13:14:38 +08:00
|
|
|
device->zone_info = zone_info;
|
|
|
|
|
2021-02-04 18:21:47 +08:00
|
|
|
if (!bdev_is_zoned(bdev)) {
|
|
|
|
if (!fs_info->zone_size) {
|
|
|
|
ret = calculate_emulated_zone_size(fs_info);
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
ASSERT(fs_info->zone_size);
|
|
|
|
zone_sectors = fs_info->zone_size >> SECTOR_SHIFT;
|
|
|
|
} else {
|
|
|
|
zone_sectors = bdev_zone_sectors(bdev);
|
|
|
|
}
|
|
|
|
|
2022-11-01 03:33:42 +08:00
|
|
|
ASSERT(is_power_of_two_u64(zone_sectors));
|
2020-11-10 19:26:07 +08:00
|
|
|
zone_info->zone_size = zone_sectors << SECTOR_SHIFT;
|
btrfs: zoned: move superblock logging zone location
Moves the location of the superblock logging zones. The new locations of
the logging zones are now determined based on fixed block addresses
instead of on fixed zone numbers.
The old placement method based on fixed zone numbers causes problems when
one needs to inspect a file system image without access to the drive zone
information. In such case, the super block locations cannot be reliably
determined as the zone size is unknown. By locating the superblock logging
zones using fixed addresses, we can scan a dumped file system image without
the zone information since a super block copy will always be present at or
after the fixed known locations.
Introduce the following three pairs of zones containing fixed offset
locations, regardless of the device zone size.
- primary superblock: offset 0B (and the following zone)
- first copy: offset 512G (and the following zone)
- Second copy: offset 4T (4096G, and the following zone)
If a logging zone is outside of the disk capacity, we do not record the
superblock copy.
The first copy position is much larger than for a non-zoned filesystem,
which is at 64M. This is to avoid overlapping with the log zones for
the primary superblock. This higher location is arbitrary but allows
supporting devices with very large zone sizes, plus some space around in
between.
Such large zone size is unrealistic and very unlikely to ever be seen in
real devices. Currently, SMR disks have a zone size of 256MB, and we are
expecting ZNS drives to be in the 1-4GB range, so this limit gives us
room to breathe. For now, we only allow zone sizes up to 8GB. The
maximum zone size that would still fit in the space is 256G.
The fixed location addresses are somewhat arbitrary, with the intent of
maintaining superblock reliability for smaller and larger devices, with
the preference for the latter. For this reason, there are two superblocks
under the first 1T. This should cover use cases for physical devices and
for emulated/device-mapper devices.
The superblock logging zones are reserved for superblock logging and
never used for data or metadata blocks. Note that we only reserve the
two zones per primary/copy actually used for superblock logging. We do
not reserve the ranges of zones possibly containing superblocks with the
largest supported zone size (0-16GB, 512G-528GB, 4096G-4112G).
The zones containing the fixed location offsets used to store
superblocks on a non-zoned volume are also reserved to avoid confusion.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2021-04-08 16:25:28 +08:00
|
|
|
|
|
|
|
/* We reject devices with a zone size larger than 8GB */
|
|
|
|
if (zone_info->zone_size > BTRFS_MAX_ZONE_SIZE) {
|
|
|
|
btrfs_err_in_rcu(fs_info,
|
|
|
|
"zoned: %s: zone size %llu larger than supported maximum %llu",
|
|
|
|
rcu_str_deref(device->name),
|
|
|
|
zone_info->zone_size, BTRFS_MAX_ZONE_SIZE);
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
2022-05-13 23:52:52 +08:00
|
|
|
} else if (zone_info->zone_size < BTRFS_MIN_ZONE_SIZE) {
|
|
|
|
btrfs_err_in_rcu(fs_info,
|
|
|
|
"zoned: %s: zone size %llu smaller than supported minimum %u",
|
|
|
|
rcu_str_deref(device->name),
|
|
|
|
zone_info->zone_size, BTRFS_MIN_ZONE_SIZE);
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
btrfs: zoned: move superblock logging zone location
Moves the location of the superblock logging zones. The new locations of
the logging zones are now determined based on fixed block addresses
instead of on fixed zone numbers.
The old placement method based on fixed zone numbers causes problems when
one needs to inspect a file system image without access to the drive zone
information. In such case, the super block locations cannot be reliably
determined as the zone size is unknown. By locating the superblock logging
zones using fixed addresses, we can scan a dumped file system image without
the zone information since a super block copy will always be present at or
after the fixed known locations.
Introduce the following three pairs of zones containing fixed offset
locations, regardless of the device zone size.
- primary superblock: offset 0B (and the following zone)
- first copy: offset 512G (and the following zone)
- Second copy: offset 4T (4096G, and the following zone)
If a logging zone is outside of the disk capacity, we do not record the
superblock copy.
The first copy position is much larger than for a non-zoned filesystem,
which is at 64M. This is to avoid overlapping with the log zones for
the primary superblock. This higher location is arbitrary but allows
supporting devices with very large zone sizes, plus some space around in
between.
Such large zone size is unrealistic and very unlikely to ever be seen in
real devices. Currently, SMR disks have a zone size of 256MB, and we are
expecting ZNS drives to be in the 1-4GB range, so this limit gives us
room to breathe. For now, we only allow zone sizes up to 8GB. The
maximum zone size that would still fit in the space is 256G.
The fixed location addresses are somewhat arbitrary, with the intent of
maintaining superblock reliability for smaller and larger devices, with
the preference for the latter. For this reason, there are two superblocks
under the first 1T. This should cover use cases for physical devices and
for emulated/device-mapper devices.
The superblock logging zones are reserved for superblock logging and
never used for data or metadata blocks. Note that we only reserve the
two zones per primary/copy actually used for superblock logging. We do
not reserve the ranges of zones possibly containing superblocks with the
largest supported zone size (0-16GB, 512G-528GB, 4096G-4112G).
The zones containing the fixed location offsets used to store
superblocks on a non-zoned volume are also reserved to avoid confusion.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2021-04-08 16:25:28 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
nr_sectors = bdev_nr_sectors(bdev);
|
2020-11-10 19:26:07 +08:00
|
|
|
zone_info->zone_size_shift = ilog2(zone_info->zone_size);
|
|
|
|
zone_info->nr_zones = nr_sectors >> ilog2(zone_sectors);
|
|
|
|
if (!IS_ALIGNED(nr_sectors, zone_sectors))
|
|
|
|
zone_info->nr_zones++;
|
|
|
|
|
2022-04-15 12:52:39 +08:00
|
|
|
max_active_zones = bdev_max_active_zones(bdev);
|
2021-08-19 20:19:15 +08:00
|
|
|
if (max_active_zones && max_active_zones < BTRFS_MIN_ACTIVE_ZONES) {
|
|
|
|
btrfs_err_in_rcu(fs_info,
|
|
|
|
"zoned: %s: max active zones %u is too small, need at least %u active zones",
|
|
|
|
rcu_str_deref(device->name), max_active_zones,
|
|
|
|
BTRFS_MIN_ACTIVE_ZONES);
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
zone_info->max_active_zones = max_active_zones;
|
|
|
|
|
2020-11-10 19:26:07 +08:00
|
|
|
zone_info->seq_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
|
|
|
|
if (!zone_info->seq_zones) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
zone_info->empty_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
|
|
|
|
if (!zone_info->empty_zones) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2021-08-19 20:19:15 +08:00
|
|
|
zone_info->active_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
|
|
|
|
if (!zone_info->active_zones) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2022-11-20 20:43:03 +08:00
|
|
|
zones = kvcalloc(BTRFS_REPORT_NR_ZONES, sizeof(struct blk_zone), GFP_KERNEL);
|
2020-11-10 19:26:07 +08:00
|
|
|
if (!zones) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2021-11-11 13:14:38 +08:00
|
|
|
/*
|
|
|
|
* Enable zone cache only for a zoned device. On a non-zoned device, we
|
|
|
|
* fill the zone info with emulated CONVENTIONAL zones, so no need to
|
|
|
|
* use the cache.
|
|
|
|
*/
|
|
|
|
if (populate_cache && bdev_is_zoned(device->bdev)) {
|
|
|
|
zone_info->zone_cache = vzalloc(sizeof(struct blk_zone) *
|
|
|
|
zone_info->nr_zones);
|
|
|
|
if (!zone_info->zone_cache) {
|
|
|
|
btrfs_err_in_rcu(device->fs_info,
|
|
|
|
"zoned: failed to allocate zone cache for %s",
|
|
|
|
rcu_str_deref(device->name));
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-11-10 19:26:07 +08:00
|
|
|
/* Get zones type */
|
2021-08-19 20:19:15 +08:00
|
|
|
nactive = 0;
|
2020-11-10 19:26:07 +08:00
|
|
|
while (sector < nr_sectors) {
|
|
|
|
nr_zones = BTRFS_REPORT_NR_ZONES;
|
|
|
|
ret = btrfs_get_dev_zones(device, sector << SECTOR_SHIFT, zones,
|
|
|
|
&nr_zones);
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
for (i = 0; i < nr_zones; i++) {
|
|
|
|
if (zones[i].type == BLK_ZONE_TYPE_SEQWRITE_REQ)
|
|
|
|
__set_bit(nreported, zone_info->seq_zones);
|
2021-08-19 20:19:15 +08:00
|
|
|
switch (zones[i].cond) {
|
|
|
|
case BLK_ZONE_COND_EMPTY:
|
2020-11-10 19:26:07 +08:00
|
|
|
__set_bit(nreported, zone_info->empty_zones);
|
2021-08-19 20:19:15 +08:00
|
|
|
break;
|
|
|
|
case BLK_ZONE_COND_IMP_OPEN:
|
|
|
|
case BLK_ZONE_COND_EXP_OPEN:
|
|
|
|
case BLK_ZONE_COND_CLOSED:
|
|
|
|
__set_bit(nreported, zone_info->active_zones);
|
|
|
|
nactive++;
|
|
|
|
break;
|
|
|
|
}
|
2020-11-10 19:26:07 +08:00
|
|
|
nreported++;
|
|
|
|
}
|
|
|
|
sector = zones[nr_zones - 1].start + zones[nr_zones - 1].len;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (nreported != zone_info->nr_zones) {
|
|
|
|
btrfs_err_in_rcu(device->fs_info,
|
|
|
|
"inconsistent number of zones on %s (%u/%u)",
|
|
|
|
rcu_str_deref(device->name), nreported,
|
|
|
|
zone_info->nr_zones);
|
|
|
|
ret = -EIO;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2021-08-19 20:19:15 +08:00
|
|
|
if (max_active_zones) {
|
|
|
|
if (nactive > max_active_zones) {
|
|
|
|
btrfs_err_in_rcu(device->fs_info,
|
|
|
|
"zoned: %u active zones on %s exceeds max_active_zones %u",
|
|
|
|
nactive, rcu_str_deref(device->name),
|
|
|
|
max_active_zones);
|
|
|
|
ret = -EIO;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
atomic_set(&zone_info->active_zones_left,
|
|
|
|
max_active_zones - nactive);
|
2023-03-02 05:14:42 +08:00
|
|
|
set_bit(BTRFS_FS_ACTIVE_ZONE_TRACKING, &fs_info->flags);
|
2021-08-19 20:19:15 +08:00
|
|
|
}
|
|
|
|
|
btrfs: implement log-structured superblock for ZONED mode
Superblock (and its copies) is the only data structure in btrfs which
has a fixed location on a device. Since we cannot overwrite in a
sequential write required zone, we cannot place superblock in the zone.
One easy solution is limiting superblock and copies to be placed only in
conventional zones. However, this method has two downsides: one is
reduced number of superblock copies. The location of the second copy of
superblock is 256GB, which is in a sequential write required zone on
typical devices in the market today. So, the number of superblock and
copies is limited to be two. Second downside is that we cannot support
devices which have no conventional zones at all.
To solve these two problems, we employ superblock log writing. It uses
two adjacent zones as a circular buffer to write updated superblocks.
Once the first zone is filled up, start writing into the second one.
Then, when both zones are filled up and before starting to write to the
first zone again, it reset the first zone.
We can determine the position of the latest superblock by reading write
pointer information from a device. One corner case is when both zones
are full. For this situation, we read out the last superblock of each
zone, and compare them to determine which zone is older.
The following zones are reserved as the circular buffer on ZONED btrfs.
- The primary superblock: zones 0 and 1
- The first copy: zones 16 and 17
- The second copy: zones 1024 or zone at 256GB which is minimum, and
next to it
If these reserved zones are conventional, superblock is written fixed at
the start of the zone without logging.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2020-11-10 19:26:14 +08:00
|
|
|
/* Validate superblock log */
|
|
|
|
nr_zones = BTRFS_NR_SB_LOG_ZONES;
|
|
|
|
for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
|
|
|
|
u32 sb_zone;
|
|
|
|
u64 sb_wp;
|
|
|
|
int sb_pos = BTRFS_NR_SB_LOG_ZONES * i;
|
|
|
|
|
|
|
|
sb_zone = sb_zone_number(zone_info->zone_size_shift, i);
|
|
|
|
if (sb_zone + 1 >= zone_info->nr_zones)
|
|
|
|
continue;
|
|
|
|
|
2021-05-27 14:27:32 +08:00
|
|
|
ret = btrfs_get_dev_zones(device,
|
|
|
|
zone_start_physical(sb_zone, zone_info),
|
btrfs: implement log-structured superblock for ZONED mode
Superblock (and its copies) is the only data structure in btrfs which
has a fixed location on a device. Since we cannot overwrite in a
sequential write required zone, we cannot place superblock in the zone.
One easy solution is limiting superblock and copies to be placed only in
conventional zones. However, this method has two downsides: one is
reduced number of superblock copies. The location of the second copy of
superblock is 256GB, which is in a sequential write required zone on
typical devices in the market today. So, the number of superblock and
copies is limited to be two. Second downside is that we cannot support
devices which have no conventional zones at all.
To solve these two problems, we employ superblock log writing. It uses
two adjacent zones as a circular buffer to write updated superblocks.
Once the first zone is filled up, start writing into the second one.
Then, when both zones are filled up and before starting to write to the
first zone again, it reset the first zone.
We can determine the position of the latest superblock by reading write
pointer information from a device. One corner case is when both zones
are full. For this situation, we read out the last superblock of each
zone, and compare them to determine which zone is older.
The following zones are reserved as the circular buffer on ZONED btrfs.
- The primary superblock: zones 0 and 1
- The first copy: zones 16 and 17
- The second copy: zones 1024 or zone at 256GB which is minimum, and
next to it
If these reserved zones are conventional, superblock is written fixed at
the start of the zone without logging.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2020-11-10 19:26:14 +08:00
|
|
|
&zone_info->sb_zones[sb_pos],
|
|
|
|
&nr_zones);
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (nr_zones != BTRFS_NR_SB_LOG_ZONES) {
|
|
|
|
btrfs_err_in_rcu(device->fs_info,
|
|
|
|
"zoned: failed to read super block log zone info at devid %llu zone %u",
|
|
|
|
device->devid, sb_zone);
|
|
|
|
ret = -EUCLEAN;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2021-05-21 23:42:23 +08:00
|
|
|
* If zones[0] is conventional, always use the beginning of the
|
btrfs: implement log-structured superblock for ZONED mode
Superblock (and its copies) is the only data structure in btrfs which
has a fixed location on a device. Since we cannot overwrite in a
sequential write required zone, we cannot place superblock in the zone.
One easy solution is limiting superblock and copies to be placed only in
conventional zones. However, this method has two downsides: one is
reduced number of superblock copies. The location of the second copy of
superblock is 256GB, which is in a sequential write required zone on
typical devices in the market today. So, the number of superblock and
copies is limited to be two. Second downside is that we cannot support
devices which have no conventional zones at all.
To solve these two problems, we employ superblock log writing. It uses
two adjacent zones as a circular buffer to write updated superblocks.
Once the first zone is filled up, start writing into the second one.
Then, when both zones are filled up and before starting to write to the
first zone again, it reset the first zone.
We can determine the position of the latest superblock by reading write
pointer information from a device. One corner case is when both zones
are full. For this situation, we read out the last superblock of each
zone, and compare them to determine which zone is older.
The following zones are reserved as the circular buffer on ZONED btrfs.
- The primary superblock: zones 0 and 1
- The first copy: zones 16 and 17
- The second copy: zones 1024 or zone at 256GB which is minimum, and
next to it
If these reserved zones are conventional, superblock is written fixed at
the start of the zone without logging.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2020-11-10 19:26:14 +08:00
|
|
|
* zone to record superblock. No need to validate in that case.
|
|
|
|
*/
|
|
|
|
if (zone_info->sb_zones[BTRFS_NR_SB_LOG_ZONES * i].type ==
|
|
|
|
BLK_ZONE_TYPE_CONVENTIONAL)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
ret = sb_write_pointer(device->bdev,
|
|
|
|
&zone_info->sb_zones[sb_pos], &sb_wp);
|
|
|
|
if (ret != -ENOENT && ret) {
|
|
|
|
btrfs_err_in_rcu(device->fs_info,
|
|
|
|
"zoned: super block log zone corrupted devid %llu zone %u",
|
|
|
|
device->devid, sb_zone);
|
|
|
|
ret = -EUCLEAN;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2022-11-20 20:43:03 +08:00
|
|
|
kvfree(zones);
|
2020-11-10 19:26:07 +08:00
|
|
|
|
2021-02-04 18:21:47 +08:00
|
|
|
switch (bdev_zoned_model(bdev)) {
|
|
|
|
case BLK_ZONED_HM:
|
|
|
|
model = "host-managed zoned";
|
|
|
|
emulated = "";
|
|
|
|
break;
|
|
|
|
case BLK_ZONED_HA:
|
|
|
|
model = "host-aware zoned";
|
|
|
|
emulated = "";
|
|
|
|
break;
|
|
|
|
case BLK_ZONED_NONE:
|
|
|
|
model = "regular";
|
|
|
|
emulated = "emulated ";
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
/* Just in case */
|
|
|
|
btrfs_err_in_rcu(fs_info, "zoned: unsupported model %d on %s",
|
|
|
|
bdev_zoned_model(bdev),
|
|
|
|
rcu_str_deref(device->name));
|
|
|
|
ret = -EOPNOTSUPP;
|
|
|
|
goto out_free_zone_info;
|
|
|
|
}
|
|
|
|
|
|
|
|
btrfs_info_in_rcu(fs_info,
|
|
|
|
"%s block device %s, %u %szones of %llu bytes",
|
|
|
|
model, rcu_str_deref(device->name), zone_info->nr_zones,
|
|
|
|
emulated, zone_info->zone_size);
|
2020-11-10 19:26:07 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
out:
|
2022-11-20 20:43:03 +08:00
|
|
|
kvfree(zones);
|
2021-02-04 18:21:47 +08:00
|
|
|
out_free_zone_info:
|
2021-11-11 13:14:38 +08:00
|
|
|
btrfs_destroy_dev_zone_info(device);
|
2020-11-10 19:26:07 +08:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
void btrfs_destroy_dev_zone_info(struct btrfs_device *device)
|
|
|
|
{
|
|
|
|
struct btrfs_zoned_device_info *zone_info = device->zone_info;
|
|
|
|
|
|
|
|
if (!zone_info)
|
|
|
|
return;
|
|
|
|
|
2021-08-19 20:19:15 +08:00
|
|
|
bitmap_free(zone_info->active_zones);
|
2020-11-10 19:26:07 +08:00
|
|
|
bitmap_free(zone_info->seq_zones);
|
|
|
|
bitmap_free(zone_info->empty_zones);
|
2021-11-11 13:14:38 +08:00
|
|
|
vfree(zone_info->zone_cache);
|
2020-11-10 19:26:07 +08:00
|
|
|
kfree(zone_info);
|
|
|
|
device->zone_info = NULL;
|
|
|
|
}
|
|
|
|
|
2022-11-04 22:12:33 +08:00
|
|
|
struct btrfs_zoned_device_info *btrfs_clone_dev_zone_info(struct btrfs_device *orig_dev)
|
|
|
|
{
|
|
|
|
struct btrfs_zoned_device_info *zone_info;
|
|
|
|
|
|
|
|
zone_info = kmemdup(orig_dev->zone_info, sizeof(*zone_info), GFP_KERNEL);
|
|
|
|
if (!zone_info)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
zone_info->seq_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
|
|
|
|
if (!zone_info->seq_zones)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
bitmap_copy(zone_info->seq_zones, orig_dev->zone_info->seq_zones,
|
|
|
|
zone_info->nr_zones);
|
|
|
|
|
|
|
|
zone_info->empty_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
|
|
|
|
if (!zone_info->empty_zones)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
bitmap_copy(zone_info->empty_zones, orig_dev->zone_info->empty_zones,
|
|
|
|
zone_info->nr_zones);
|
|
|
|
|
|
|
|
zone_info->active_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
|
|
|
|
if (!zone_info->active_zones)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
bitmap_copy(zone_info->active_zones, orig_dev->zone_info->active_zones,
|
|
|
|
zone_info->nr_zones);
|
|
|
|
zone_info->zone_cache = NULL;
|
|
|
|
|
|
|
|
return zone_info;
|
|
|
|
|
|
|
|
out:
|
|
|
|
bitmap_free(zone_info->seq_zones);
|
|
|
|
bitmap_free(zone_info->empty_zones);
|
|
|
|
bitmap_free(zone_info->active_zones);
|
|
|
|
kfree(zone_info);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2020-11-10 19:26:07 +08:00
|
|
|
int btrfs_get_dev_zone(struct btrfs_device *device, u64 pos,
|
|
|
|
struct blk_zone *zone)
|
|
|
|
{
|
|
|
|
unsigned int nr_zones = 1;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = btrfs_get_dev_zones(device, pos, zone, &nr_zones);
|
|
|
|
if (ret != 0 || !nr_zones)
|
|
|
|
return ret ? ret : -EIO;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2020-11-10 19:26:08 +08:00
|
|
|
|
2022-09-07 17:22:14 +08:00
|
|
|
static int btrfs_check_for_zoned_device(struct btrfs_fs_info *fs_info)
|
|
|
|
{
|
|
|
|
struct btrfs_device *device;
|
|
|
|
|
|
|
|
list_for_each_entry(device, &fs_info->fs_devices->devices, dev_list) {
|
|
|
|
if (device->bdev &&
|
|
|
|
bdev_zoned_model(device->bdev) == BLK_ZONED_HM) {
|
|
|
|
btrfs_err(fs_info,
|
|
|
|
"zoned: mode not enabled but zoned device found: %pg",
|
|
|
|
device->bdev);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-11-10 19:26:08 +08:00
|
|
|
int btrfs_check_zoned_mode(struct btrfs_fs_info *fs_info)
|
|
|
|
{
|
2023-01-21 14:50:29 +08:00
|
|
|
struct queue_limits *lim = &fs_info->limits;
|
2020-11-10 19:26:08 +08:00
|
|
|
struct btrfs_device *device;
|
|
|
|
u64 zone_size = 0;
|
2022-09-07 17:22:14 +08:00
|
|
|
int ret;
|
2020-11-10 19:26:08 +08:00
|
|
|
|
2022-09-07 17:22:14 +08:00
|
|
|
/*
|
|
|
|
* Host-Managed devices can't be used without the ZONED flag. With the
|
|
|
|
* ZONED all devices can be used, using zone emulation if required.
|
|
|
|
*/
|
|
|
|
if (!btrfs_fs_incompat(fs_info, ZONED))
|
|
|
|
return btrfs_check_for_zoned_device(fs_info);
|
|
|
|
|
2023-01-21 14:50:29 +08:00
|
|
|
blk_set_stacking_limits(lim);
|
|
|
|
|
2022-09-07 17:22:14 +08:00
|
|
|
list_for_each_entry(device, &fs_info->fs_devices->devices, dev_list) {
|
|
|
|
struct btrfs_zoned_device_info *zone_info = device->zone_info;
|
2020-11-10 19:26:08 +08:00
|
|
|
|
|
|
|
if (!device->bdev)
|
|
|
|
continue;
|
|
|
|
|
2022-09-07 17:22:14 +08:00
|
|
|
if (!zone_size) {
|
|
|
|
zone_size = zone_info->zone_size;
|
|
|
|
} else if (zone_info->zone_size != zone_size) {
|
|
|
|
btrfs_err(fs_info,
|
2020-11-10 19:26:08 +08:00
|
|
|
"zoned: unequal block device zone sizes: have %llu found %llu",
|
2022-09-07 17:22:14 +08:00
|
|
|
zone_info->zone_size, zone_size);
|
|
|
|
return -EINVAL;
|
2020-11-10 19:26:08 +08:00
|
|
|
}
|
2023-01-21 14:50:29 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* With the zoned emulation, we can have non-zoned device on the
|
|
|
|
* zoned mode. In this case, we don't have a valid max zone
|
|
|
|
* append size.
|
|
|
|
*/
|
|
|
|
if (bdev_is_zoned(device->bdev)) {
|
|
|
|
blk_stack_limits(lim,
|
|
|
|
&bdev_get_queue(device->bdev)->limits,
|
|
|
|
0);
|
|
|
|
}
|
2020-11-10 19:26:08 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* stripe_size is always aligned to BTRFS_STRIPE_LEN in
|
2021-08-18 18:41:19 +08:00
|
|
|
* btrfs_create_chunk(). Since we want stripe_len == zone_size,
|
2020-11-10 19:26:08 +08:00
|
|
|
* check the alignment here.
|
|
|
|
*/
|
|
|
|
if (!IS_ALIGNED(zone_size, BTRFS_STRIPE_LEN)) {
|
|
|
|
btrfs_err(fs_info,
|
|
|
|
"zoned: zone size %llu not aligned to stripe %u",
|
|
|
|
zone_size, BTRFS_STRIPE_LEN);
|
2022-09-07 17:22:14 +08:00
|
|
|
return -EINVAL;
|
2020-11-10 19:26:08 +08:00
|
|
|
}
|
|
|
|
|
2020-11-10 19:26:13 +08:00
|
|
|
if (btrfs_fs_incompat(fs_info, MIXED_GROUPS)) {
|
|
|
|
btrfs_err(fs_info, "zoned: mixed block groups not supported");
|
2022-09-07 17:22:14 +08:00
|
|
|
return -EINVAL;
|
2020-11-10 19:26:13 +08:00
|
|
|
}
|
|
|
|
|
2020-11-10 19:26:08 +08:00
|
|
|
fs_info->zone_size = zone_size;
|
2023-01-21 14:50:29 +08:00
|
|
|
/*
|
|
|
|
* Also limit max_zone_append_size by max_segments * PAGE_SIZE.
|
|
|
|
* Technically, we can have multiple pages per segment. But, since
|
|
|
|
* we add the pages one by one to a bio, and cannot increase the
|
|
|
|
* metadata reservation even if it increases the number of extents, it
|
|
|
|
* is safe to stick with the limit.
|
|
|
|
*/
|
|
|
|
fs_info->max_zone_append_size = ALIGN_DOWN(
|
|
|
|
min3((u64)lim->max_zone_append_sectors << SECTOR_SHIFT,
|
|
|
|
(u64)lim->max_sectors << SECTOR_SHIFT,
|
|
|
|
(u64)lim->max_segments << PAGE_SHIFT),
|
|
|
|
fs_info->sectorsize);
|
2021-02-04 18:21:48 +08:00
|
|
|
fs_info->fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_ZONED;
|
btrfs: replace BTRFS_MAX_EXTENT_SIZE with fs_info->max_extent_size
On zoned filesystem, data write out is limited by max_zone_append_size,
and a large ordered extent is split according the size of a bio. OTOH,
the number of extents to be written is calculated using
BTRFS_MAX_EXTENT_SIZE, and that estimated number is used to reserve the
metadata bytes to update and/or create the metadata items.
The metadata reservation is done at e.g, btrfs_buffered_write() and then
released according to the estimation changes. Thus, if the number of extent
increases massively, the reserved metadata can run out.
The increase of the number of extents easily occurs on zoned filesystem
if BTRFS_MAX_EXTENT_SIZE > max_zone_append_size. And, it causes the
following warning on a small RAM environment with disabling metadata
over-commit (in the following patch).
[75721.498492] ------------[ cut here ]------------
[75721.505624] BTRFS: block rsv 1 returned -28
[75721.512230] WARNING: CPU: 24 PID: 2327559 at fs/btrfs/block-rsv.c:537 btrfs_use_block_rsv+0x560/0x760 [btrfs]
[75721.581854] CPU: 24 PID: 2327559 Comm: kworker/u64:10 Kdump: loaded Tainted: G W 5.18.0-rc2-BTRFS-ZNS+ #109
[75721.597200] Hardware name: Supermicro Super Server/H12SSL-NT, BIOS 2.0 02/22/2021
[75721.607310] Workqueue: btrfs-endio-write btrfs_work_helper [btrfs]
[75721.616209] RIP: 0010:btrfs_use_block_rsv+0x560/0x760 [btrfs]
[75721.646649] RSP: 0018:ffffc9000fbdf3e0 EFLAGS: 00010286
[75721.654126] RAX: 0000000000000000 RBX: 0000000000004000 RCX: 0000000000000000
[75721.663524] RDX: 0000000000000004 RSI: 0000000000000008 RDI: fffff52001f7be6e
[75721.672921] RBP: ffffc9000fbdf420 R08: 0000000000000001 R09: ffff889f8d1fc6c7
[75721.682493] R10: ffffed13f1a3f8d8 R11: 0000000000000001 R12: ffff88980a3c0e28
[75721.692284] R13: ffff889b66590000 R14: ffff88980a3c0e40 R15: ffff88980a3c0e8a
[75721.701878] FS: 0000000000000000(0000) GS:ffff889f8d000000(0000) knlGS:0000000000000000
[75721.712601] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[75721.720726] CR2: 000055d12e05c018 CR3: 0000800193594000 CR4: 0000000000350ee0
[75721.730499] Call Trace:
[75721.735166] <TASK>
[75721.739886] btrfs_alloc_tree_block+0x1e1/0x1100 [btrfs]
[75721.747545] ? btrfs_alloc_logged_file_extent+0x550/0x550 [btrfs]
[75721.756145] ? btrfs_get_32+0xea/0x2d0 [btrfs]
[75721.762852] ? btrfs_get_32+0xea/0x2d0 [btrfs]
[75721.769520] ? push_leaf_left+0x420/0x620 [btrfs]
[75721.776431] ? memcpy+0x4e/0x60
[75721.781931] split_leaf+0x433/0x12d0 [btrfs]
[75721.788392] ? btrfs_get_token_32+0x580/0x580 [btrfs]
[75721.795636] ? push_for_double_split.isra.0+0x420/0x420 [btrfs]
[75721.803759] ? leaf_space_used+0x15d/0x1a0 [btrfs]
[75721.811156] btrfs_search_slot+0x1bc3/0x2790 [btrfs]
[75721.818300] ? lock_downgrade+0x7c0/0x7c0
[75721.824411] ? free_extent_buffer.part.0+0x107/0x200 [btrfs]
[75721.832456] ? split_leaf+0x12d0/0x12d0 [btrfs]
[75721.839149] ? free_extent_buffer.part.0+0x14f/0x200 [btrfs]
[75721.846945] ? free_extent_buffer+0x13/0x20 [btrfs]
[75721.853960] ? btrfs_release_path+0x4b/0x190 [btrfs]
[75721.861429] btrfs_csum_file_blocks+0x85c/0x1500 [btrfs]
[75721.869313] ? rcu_read_lock_sched_held+0x16/0x80
[75721.876085] ? lock_release+0x552/0xf80
[75721.881957] ? btrfs_del_csums+0x8c0/0x8c0 [btrfs]
[75721.888886] ? __kasan_check_write+0x14/0x20
[75721.895152] ? do_raw_read_unlock+0x44/0x80
[75721.901323] ? _raw_write_lock_irq+0x60/0x80
[75721.907983] ? btrfs_global_root+0xb9/0xe0 [btrfs]
[75721.915166] ? btrfs_csum_root+0x12b/0x180 [btrfs]
[75721.921918] ? btrfs_get_global_root+0x820/0x820 [btrfs]
[75721.929166] ? _raw_write_unlock+0x23/0x40
[75721.935116] ? unpin_extent_cache+0x1e3/0x390 [btrfs]
[75721.942041] btrfs_finish_ordered_io.isra.0+0xa0c/0x1dc0 [btrfs]
[75721.949906] ? try_to_wake_up+0x30/0x14a0
[75721.955700] ? btrfs_unlink_subvol+0xda0/0xda0 [btrfs]
[75721.962661] ? rcu_read_lock_sched_held+0x16/0x80
[75721.969111] ? lock_acquire+0x41b/0x4c0
[75721.974982] finish_ordered_fn+0x15/0x20 [btrfs]
[75721.981639] btrfs_work_helper+0x1af/0xa80 [btrfs]
[75721.988184] ? _raw_spin_unlock_irq+0x28/0x50
[75721.994643] process_one_work+0x815/0x1460
[75722.000444] ? pwq_dec_nr_in_flight+0x250/0x250
[75722.006643] ? do_raw_spin_trylock+0xbb/0x190
[75722.013086] worker_thread+0x59a/0xeb0
[75722.018511] kthread+0x2ac/0x360
[75722.023428] ? process_one_work+0x1460/0x1460
[75722.029431] ? kthread_complete_and_exit+0x30/0x30
[75722.036044] ret_from_fork+0x22/0x30
[75722.041255] </TASK>
[75722.045047] irq event stamp: 0
[75722.049703] hardirqs last enabled at (0): [<0000000000000000>] 0x0
[75722.057610] hardirqs last disabled at (0): [<ffffffff8118a94a>] copy_process+0x1c1a/0x66b0
[75722.067533] softirqs last enabled at (0): [<ffffffff8118a989>] copy_process+0x1c59/0x66b0
[75722.077423] softirqs last disabled at (0): [<0000000000000000>] 0x0
[75722.085335] ---[ end trace 0000000000000000 ]---
To fix the estimation, we need to introduce fs_info->max_extent_size to
replace BTRFS_MAX_EXTENT_SIZE, which allow setting the different size for
regular vs zoned filesystem.
Set fs_info->max_extent_size to BTRFS_MAX_EXTENT_SIZE by default. On zoned
filesystem, it is set to fs_info->max_zone_append_size.
CC: stable@vger.kernel.org # 5.12+
Fixes: d8e3fb106f39 ("btrfs: zoned: use ZONE_APPEND write for zoned mode")
Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2022-07-09 07:18:40 +08:00
|
|
|
if (fs_info->max_zone_append_size < fs_info->max_extent_size)
|
|
|
|
fs_info->max_extent_size = fs_info->max_zone_append_size;
|
2020-11-10 19:26:08 +08:00
|
|
|
|
2021-02-04 18:21:45 +08:00
|
|
|
/*
|
|
|
|
* Check mount options here, because we might change fs_info->zoned
|
|
|
|
* from fs_info->zone_size.
|
|
|
|
*/
|
|
|
|
ret = btrfs_check_mountopts_zoned(fs_info);
|
|
|
|
if (ret)
|
2022-09-07 17:22:14 +08:00
|
|
|
return ret;
|
2021-02-04 18:21:45 +08:00
|
|
|
|
2020-11-10 19:26:08 +08:00
|
|
|
btrfs_info(fs_info, "zoned mode enabled with zone size %llu", zone_size);
|
2022-09-07 17:22:14 +08:00
|
|
|
return 0;
|
2020-11-10 19:26:08 +08:00
|
|
|
}
|
2020-11-10 19:26:10 +08:00
|
|
|
|
|
|
|
int btrfs_check_mountopts_zoned(struct btrfs_fs_info *info)
|
|
|
|
{
|
|
|
|
if (!btrfs_is_zoned(info))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Space cache writing is not COWed. Disable that to avoid write errors
|
|
|
|
* in sequential zones.
|
|
|
|
*/
|
|
|
|
if (btrfs_test_opt(info, SPACE_CACHE)) {
|
|
|
|
btrfs_err(info, "zoned: space cache v1 is not supported");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2020-11-10 19:26:11 +08:00
|
|
|
if (btrfs_test_opt(info, NODATACOW)) {
|
|
|
|
btrfs_err(info, "zoned: NODATACOW not supported");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2020-11-10 19:26:10 +08:00
|
|
|
return 0;
|
|
|
|
}
|
btrfs: implement log-structured superblock for ZONED mode
Superblock (and its copies) is the only data structure in btrfs which
has a fixed location on a device. Since we cannot overwrite in a
sequential write required zone, we cannot place superblock in the zone.
One easy solution is limiting superblock and copies to be placed only in
conventional zones. However, this method has two downsides: one is
reduced number of superblock copies. The location of the second copy of
superblock is 256GB, which is in a sequential write required zone on
typical devices in the market today. So, the number of superblock and
copies is limited to be two. Second downside is that we cannot support
devices which have no conventional zones at all.
To solve these two problems, we employ superblock log writing. It uses
two adjacent zones as a circular buffer to write updated superblocks.
Once the first zone is filled up, start writing into the second one.
Then, when both zones are filled up and before starting to write to the
first zone again, it reset the first zone.
We can determine the position of the latest superblock by reading write
pointer information from a device. One corner case is when both zones
are full. For this situation, we read out the last superblock of each
zone, and compare them to determine which zone is older.
The following zones are reserved as the circular buffer on ZONED btrfs.
- The primary superblock: zones 0 and 1
- The first copy: zones 16 and 17
- The second copy: zones 1024 or zone at 256GB which is minimum, and
next to it
If these reserved zones are conventional, superblock is written fixed at
the start of the zone without logging.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2020-11-10 19:26:14 +08:00
|
|
|
|
|
|
|
static int sb_log_location(struct block_device *bdev, struct blk_zone *zones,
|
|
|
|
int rw, u64 *bytenr_ret)
|
|
|
|
{
|
|
|
|
u64 wp;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (zones[0].type == BLK_ZONE_TYPE_CONVENTIONAL) {
|
|
|
|
*bytenr_ret = zones[0].start << SECTOR_SHIFT;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = sb_write_pointer(bdev, zones, &wp);
|
|
|
|
if (ret != -ENOENT && ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
if (rw == WRITE) {
|
|
|
|
struct blk_zone *reset = NULL;
|
|
|
|
|
|
|
|
if (wp == zones[0].start << SECTOR_SHIFT)
|
|
|
|
reset = &zones[0];
|
|
|
|
else if (wp == zones[1].start << SECTOR_SHIFT)
|
|
|
|
reset = &zones[1];
|
|
|
|
|
|
|
|
if (reset && reset->cond != BLK_ZONE_COND_EMPTY) {
|
2021-08-19 20:19:12 +08:00
|
|
|
ASSERT(sb_zone_is_full(reset));
|
btrfs: implement log-structured superblock for ZONED mode
Superblock (and its copies) is the only data structure in btrfs which
has a fixed location on a device. Since we cannot overwrite in a
sequential write required zone, we cannot place superblock in the zone.
One easy solution is limiting superblock and copies to be placed only in
conventional zones. However, this method has two downsides: one is
reduced number of superblock copies. The location of the second copy of
superblock is 256GB, which is in a sequential write required zone on
typical devices in the market today. So, the number of superblock and
copies is limited to be two. Second downside is that we cannot support
devices which have no conventional zones at all.
To solve these two problems, we employ superblock log writing. It uses
two adjacent zones as a circular buffer to write updated superblocks.
Once the first zone is filled up, start writing into the second one.
Then, when both zones are filled up and before starting to write to the
first zone again, it reset the first zone.
We can determine the position of the latest superblock by reading write
pointer information from a device. One corner case is when both zones
are full. For this situation, we read out the last superblock of each
zone, and compare them to determine which zone is older.
The following zones are reserved as the circular buffer on ZONED btrfs.
- The primary superblock: zones 0 and 1
- The first copy: zones 16 and 17
- The second copy: zones 1024 or zone at 256GB which is minimum, and
next to it
If these reserved zones are conventional, superblock is written fixed at
the start of the zone without logging.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2020-11-10 19:26:14 +08:00
|
|
|
|
|
|
|
ret = blkdev_zone_mgmt(bdev, REQ_OP_ZONE_RESET,
|
|
|
|
reset->start, reset->len,
|
|
|
|
GFP_NOFS);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
reset->cond = BLK_ZONE_COND_EMPTY;
|
|
|
|
reset->wp = reset->start;
|
|
|
|
}
|
|
|
|
} else if (ret != -ENOENT) {
|
2021-08-19 20:19:13 +08:00
|
|
|
/*
|
|
|
|
* For READ, we want the previous one. Move write pointer to
|
|
|
|
* the end of a zone, if it is at the head of a zone.
|
|
|
|
*/
|
|
|
|
u64 zone_end = 0;
|
|
|
|
|
btrfs: implement log-structured superblock for ZONED mode
Superblock (and its copies) is the only data structure in btrfs which
has a fixed location on a device. Since we cannot overwrite in a
sequential write required zone, we cannot place superblock in the zone.
One easy solution is limiting superblock and copies to be placed only in
conventional zones. However, this method has two downsides: one is
reduced number of superblock copies. The location of the second copy of
superblock is 256GB, which is in a sequential write required zone on
typical devices in the market today. So, the number of superblock and
copies is limited to be two. Second downside is that we cannot support
devices which have no conventional zones at all.
To solve these two problems, we employ superblock log writing. It uses
two adjacent zones as a circular buffer to write updated superblocks.
Once the first zone is filled up, start writing into the second one.
Then, when both zones are filled up and before starting to write to the
first zone again, it reset the first zone.
We can determine the position of the latest superblock by reading write
pointer information from a device. One corner case is when both zones
are full. For this situation, we read out the last superblock of each
zone, and compare them to determine which zone is older.
The following zones are reserved as the circular buffer on ZONED btrfs.
- The primary superblock: zones 0 and 1
- The first copy: zones 16 and 17
- The second copy: zones 1024 or zone at 256GB which is minimum, and
next to it
If these reserved zones are conventional, superblock is written fixed at
the start of the zone without logging.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2020-11-10 19:26:14 +08:00
|
|
|
if (wp == zones[0].start << SECTOR_SHIFT)
|
2021-08-19 20:19:13 +08:00
|
|
|
zone_end = zones[1].start + zones[1].capacity;
|
|
|
|
else if (wp == zones[1].start << SECTOR_SHIFT)
|
|
|
|
zone_end = zones[0].start + zones[0].capacity;
|
|
|
|
if (zone_end)
|
|
|
|
wp = ALIGN_DOWN(zone_end << SECTOR_SHIFT,
|
|
|
|
BTRFS_SUPER_INFO_SIZE);
|
|
|
|
|
btrfs: implement log-structured superblock for ZONED mode
Superblock (and its copies) is the only data structure in btrfs which
has a fixed location on a device. Since we cannot overwrite in a
sequential write required zone, we cannot place superblock in the zone.
One easy solution is limiting superblock and copies to be placed only in
conventional zones. However, this method has two downsides: one is
reduced number of superblock copies. The location of the second copy of
superblock is 256GB, which is in a sequential write required zone on
typical devices in the market today. So, the number of superblock and
copies is limited to be two. Second downside is that we cannot support
devices which have no conventional zones at all.
To solve these two problems, we employ superblock log writing. It uses
two adjacent zones as a circular buffer to write updated superblocks.
Once the first zone is filled up, start writing into the second one.
Then, when both zones are filled up and before starting to write to the
first zone again, it reset the first zone.
We can determine the position of the latest superblock by reading write
pointer information from a device. One corner case is when both zones
are full. For this situation, we read out the last superblock of each
zone, and compare them to determine which zone is older.
The following zones are reserved as the circular buffer on ZONED btrfs.
- The primary superblock: zones 0 and 1
- The first copy: zones 16 and 17
- The second copy: zones 1024 or zone at 256GB which is minimum, and
next to it
If these reserved zones are conventional, superblock is written fixed at
the start of the zone without logging.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2020-11-10 19:26:14 +08:00
|
|
|
wp -= BTRFS_SUPER_INFO_SIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
*bytenr_ret = wp;
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
int btrfs_sb_log_location_bdev(struct block_device *bdev, int mirror, int rw,
|
|
|
|
u64 *bytenr_ret)
|
|
|
|
{
|
|
|
|
struct blk_zone zones[BTRFS_NR_SB_LOG_ZONES];
|
2021-03-03 16:55:46 +08:00
|
|
|
sector_t zone_sectors;
|
btrfs: implement log-structured superblock for ZONED mode
Superblock (and its copies) is the only data structure in btrfs which
has a fixed location on a device. Since we cannot overwrite in a
sequential write required zone, we cannot place superblock in the zone.
One easy solution is limiting superblock and copies to be placed only in
conventional zones. However, this method has two downsides: one is
reduced number of superblock copies. The location of the second copy of
superblock is 256GB, which is in a sequential write required zone on
typical devices in the market today. So, the number of superblock and
copies is limited to be two. Second downside is that we cannot support
devices which have no conventional zones at all.
To solve these two problems, we employ superblock log writing. It uses
two adjacent zones as a circular buffer to write updated superblocks.
Once the first zone is filled up, start writing into the second one.
Then, when both zones are filled up and before starting to write to the
first zone again, it reset the first zone.
We can determine the position of the latest superblock by reading write
pointer information from a device. One corner case is when both zones
are full. For this situation, we read out the last superblock of each
zone, and compare them to determine which zone is older.
The following zones are reserved as the circular buffer on ZONED btrfs.
- The primary superblock: zones 0 and 1
- The first copy: zones 16 and 17
- The second copy: zones 1024 or zone at 256GB which is minimum, and
next to it
If these reserved zones are conventional, superblock is written fixed at
the start of the zone without logging.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2020-11-10 19:26:14 +08:00
|
|
|
u32 sb_zone;
|
|
|
|
int ret;
|
|
|
|
u8 zone_sectors_shift;
|
|
|
|
sector_t nr_sectors;
|
|
|
|
u32 nr_zones;
|
|
|
|
|
|
|
|
if (!bdev_is_zoned(bdev)) {
|
|
|
|
*bytenr_ret = btrfs_sb_offset(mirror);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
ASSERT(rw == READ || rw == WRITE);
|
|
|
|
|
|
|
|
zone_sectors = bdev_zone_sectors(bdev);
|
|
|
|
if (!is_power_of_2(zone_sectors))
|
|
|
|
return -EINVAL;
|
|
|
|
zone_sectors_shift = ilog2(zone_sectors);
|
2020-12-17 04:57:51 +08:00
|
|
|
nr_sectors = bdev_nr_sectors(bdev);
|
btrfs: implement log-structured superblock for ZONED mode
Superblock (and its copies) is the only data structure in btrfs which
has a fixed location on a device. Since we cannot overwrite in a
sequential write required zone, we cannot place superblock in the zone.
One easy solution is limiting superblock and copies to be placed only in
conventional zones. However, this method has two downsides: one is
reduced number of superblock copies. The location of the second copy of
superblock is 256GB, which is in a sequential write required zone on
typical devices in the market today. So, the number of superblock and
copies is limited to be two. Second downside is that we cannot support
devices which have no conventional zones at all.
To solve these two problems, we employ superblock log writing. It uses
two adjacent zones as a circular buffer to write updated superblocks.
Once the first zone is filled up, start writing into the second one.
Then, when both zones are filled up and before starting to write to the
first zone again, it reset the first zone.
We can determine the position of the latest superblock by reading write
pointer information from a device. One corner case is when both zones
are full. For this situation, we read out the last superblock of each
zone, and compare them to determine which zone is older.
The following zones are reserved as the circular buffer on ZONED btrfs.
- The primary superblock: zones 0 and 1
- The first copy: zones 16 and 17
- The second copy: zones 1024 or zone at 256GB which is minimum, and
next to it
If these reserved zones are conventional, superblock is written fixed at
the start of the zone without logging.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2020-11-10 19:26:14 +08:00
|
|
|
nr_zones = nr_sectors >> zone_sectors_shift;
|
|
|
|
|
|
|
|
sb_zone = sb_zone_number(zone_sectors_shift + SECTOR_SHIFT, mirror);
|
|
|
|
if (sb_zone + 1 >= nr_zones)
|
|
|
|
return -ENOENT;
|
|
|
|
|
2021-05-27 14:27:32 +08:00
|
|
|
ret = blkdev_report_zones(bdev, zone_start_sector(sb_zone, bdev),
|
btrfs: implement log-structured superblock for ZONED mode
Superblock (and its copies) is the only data structure in btrfs which
has a fixed location on a device. Since we cannot overwrite in a
sequential write required zone, we cannot place superblock in the zone.
One easy solution is limiting superblock and copies to be placed only in
conventional zones. However, this method has two downsides: one is
reduced number of superblock copies. The location of the second copy of
superblock is 256GB, which is in a sequential write required zone on
typical devices in the market today. So, the number of superblock and
copies is limited to be two. Second downside is that we cannot support
devices which have no conventional zones at all.
To solve these two problems, we employ superblock log writing. It uses
two adjacent zones as a circular buffer to write updated superblocks.
Once the first zone is filled up, start writing into the second one.
Then, when both zones are filled up and before starting to write to the
first zone again, it reset the first zone.
We can determine the position of the latest superblock by reading write
pointer information from a device. One corner case is when both zones
are full. For this situation, we read out the last superblock of each
zone, and compare them to determine which zone is older.
The following zones are reserved as the circular buffer on ZONED btrfs.
- The primary superblock: zones 0 and 1
- The first copy: zones 16 and 17
- The second copy: zones 1024 or zone at 256GB which is minimum, and
next to it
If these reserved zones are conventional, superblock is written fixed at
the start of the zone without logging.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2020-11-10 19:26:14 +08:00
|
|
|
BTRFS_NR_SB_LOG_ZONES, copy_zone_info_cb,
|
|
|
|
zones);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
if (ret != BTRFS_NR_SB_LOG_ZONES)
|
|
|
|
return -EIO;
|
|
|
|
|
|
|
|
return sb_log_location(bdev, zones, rw, bytenr_ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
int btrfs_sb_log_location(struct btrfs_device *device, int mirror, int rw,
|
|
|
|
u64 *bytenr_ret)
|
|
|
|
{
|
|
|
|
struct btrfs_zoned_device_info *zinfo = device->zone_info;
|
|
|
|
u32 zone_num;
|
|
|
|
|
2021-02-04 18:21:43 +08:00
|
|
|
/*
|
|
|
|
* For a zoned filesystem on a non-zoned block device, use the same
|
|
|
|
* super block locations as regular filesystem. Doing so, the super
|
|
|
|
* block can always be retrieved and the zoned flag of the volume
|
|
|
|
* detected from the super block information.
|
|
|
|
*/
|
|
|
|
if (!bdev_is_zoned(device->bdev)) {
|
btrfs: implement log-structured superblock for ZONED mode
Superblock (and its copies) is the only data structure in btrfs which
has a fixed location on a device. Since we cannot overwrite in a
sequential write required zone, we cannot place superblock in the zone.
One easy solution is limiting superblock and copies to be placed only in
conventional zones. However, this method has two downsides: one is
reduced number of superblock copies. The location of the second copy of
superblock is 256GB, which is in a sequential write required zone on
typical devices in the market today. So, the number of superblock and
copies is limited to be two. Second downside is that we cannot support
devices which have no conventional zones at all.
To solve these two problems, we employ superblock log writing. It uses
two adjacent zones as a circular buffer to write updated superblocks.
Once the first zone is filled up, start writing into the second one.
Then, when both zones are filled up and before starting to write to the
first zone again, it reset the first zone.
We can determine the position of the latest superblock by reading write
pointer information from a device. One corner case is when both zones
are full. For this situation, we read out the last superblock of each
zone, and compare them to determine which zone is older.
The following zones are reserved as the circular buffer on ZONED btrfs.
- The primary superblock: zones 0 and 1
- The first copy: zones 16 and 17
- The second copy: zones 1024 or zone at 256GB which is minimum, and
next to it
If these reserved zones are conventional, superblock is written fixed at
the start of the zone without logging.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2020-11-10 19:26:14 +08:00
|
|
|
*bytenr_ret = btrfs_sb_offset(mirror);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
zone_num = sb_zone_number(zinfo->zone_size_shift, mirror);
|
|
|
|
if (zone_num + 1 >= zinfo->nr_zones)
|
|
|
|
return -ENOENT;
|
|
|
|
|
|
|
|
return sb_log_location(device->bdev,
|
|
|
|
&zinfo->sb_zones[BTRFS_NR_SB_LOG_ZONES * mirror],
|
|
|
|
rw, bytenr_ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool is_sb_log_zone(struct btrfs_zoned_device_info *zinfo,
|
|
|
|
int mirror)
|
|
|
|
{
|
|
|
|
u32 zone_num;
|
|
|
|
|
|
|
|
if (!zinfo)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
zone_num = sb_zone_number(zinfo->zone_size_shift, mirror);
|
|
|
|
if (zone_num + 1 >= zinfo->nr_zones)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (!test_bit(zone_num, zinfo->seq_zones))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2021-08-19 20:19:14 +08:00
|
|
|
int btrfs_advance_sb_log(struct btrfs_device *device, int mirror)
|
btrfs: implement log-structured superblock for ZONED mode
Superblock (and its copies) is the only data structure in btrfs which
has a fixed location on a device. Since we cannot overwrite in a
sequential write required zone, we cannot place superblock in the zone.
One easy solution is limiting superblock and copies to be placed only in
conventional zones. However, this method has two downsides: one is
reduced number of superblock copies. The location of the second copy of
superblock is 256GB, which is in a sequential write required zone on
typical devices in the market today. So, the number of superblock and
copies is limited to be two. Second downside is that we cannot support
devices which have no conventional zones at all.
To solve these two problems, we employ superblock log writing. It uses
two adjacent zones as a circular buffer to write updated superblocks.
Once the first zone is filled up, start writing into the second one.
Then, when both zones are filled up and before starting to write to the
first zone again, it reset the first zone.
We can determine the position of the latest superblock by reading write
pointer information from a device. One corner case is when both zones
are full. For this situation, we read out the last superblock of each
zone, and compare them to determine which zone is older.
The following zones are reserved as the circular buffer on ZONED btrfs.
- The primary superblock: zones 0 and 1
- The first copy: zones 16 and 17
- The second copy: zones 1024 or zone at 256GB which is minimum, and
next to it
If these reserved zones are conventional, superblock is written fixed at
the start of the zone without logging.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2020-11-10 19:26:14 +08:00
|
|
|
{
|
|
|
|
struct btrfs_zoned_device_info *zinfo = device->zone_info;
|
|
|
|
struct blk_zone *zone;
|
2021-08-19 20:19:14 +08:00
|
|
|
int i;
|
btrfs: implement log-structured superblock for ZONED mode
Superblock (and its copies) is the only data structure in btrfs which
has a fixed location on a device. Since we cannot overwrite in a
sequential write required zone, we cannot place superblock in the zone.
One easy solution is limiting superblock and copies to be placed only in
conventional zones. However, this method has two downsides: one is
reduced number of superblock copies. The location of the second copy of
superblock is 256GB, which is in a sequential write required zone on
typical devices in the market today. So, the number of superblock and
copies is limited to be two. Second downside is that we cannot support
devices which have no conventional zones at all.
To solve these two problems, we employ superblock log writing. It uses
two adjacent zones as a circular buffer to write updated superblocks.
Once the first zone is filled up, start writing into the second one.
Then, when both zones are filled up and before starting to write to the
first zone again, it reset the first zone.
We can determine the position of the latest superblock by reading write
pointer information from a device. One corner case is when both zones
are full. For this situation, we read out the last superblock of each
zone, and compare them to determine which zone is older.
The following zones are reserved as the circular buffer on ZONED btrfs.
- The primary superblock: zones 0 and 1
- The first copy: zones 16 and 17
- The second copy: zones 1024 or zone at 256GB which is minimum, and
next to it
If these reserved zones are conventional, superblock is written fixed at
the start of the zone without logging.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2020-11-10 19:26:14 +08:00
|
|
|
|
|
|
|
if (!is_sb_log_zone(zinfo, mirror))
|
2021-08-19 20:19:14 +08:00
|
|
|
return 0;
|
btrfs: implement log-structured superblock for ZONED mode
Superblock (and its copies) is the only data structure in btrfs which
has a fixed location on a device. Since we cannot overwrite in a
sequential write required zone, we cannot place superblock in the zone.
One easy solution is limiting superblock and copies to be placed only in
conventional zones. However, this method has two downsides: one is
reduced number of superblock copies. The location of the second copy of
superblock is 256GB, which is in a sequential write required zone on
typical devices in the market today. So, the number of superblock and
copies is limited to be two. Second downside is that we cannot support
devices which have no conventional zones at all.
To solve these two problems, we employ superblock log writing. It uses
two adjacent zones as a circular buffer to write updated superblocks.
Once the first zone is filled up, start writing into the second one.
Then, when both zones are filled up and before starting to write to the
first zone again, it reset the first zone.
We can determine the position of the latest superblock by reading write
pointer information from a device. One corner case is when both zones
are full. For this situation, we read out the last superblock of each
zone, and compare them to determine which zone is older.
The following zones are reserved as the circular buffer on ZONED btrfs.
- The primary superblock: zones 0 and 1
- The first copy: zones 16 and 17
- The second copy: zones 1024 or zone at 256GB which is minimum, and
next to it
If these reserved zones are conventional, superblock is written fixed at
the start of the zone without logging.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2020-11-10 19:26:14 +08:00
|
|
|
|
|
|
|
zone = &zinfo->sb_zones[BTRFS_NR_SB_LOG_ZONES * mirror];
|
2021-08-19 20:19:14 +08:00
|
|
|
for (i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++) {
|
|
|
|
/* Advance the next zone */
|
|
|
|
if (zone->cond == BLK_ZONE_COND_FULL) {
|
|
|
|
zone++;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
btrfs: implement log-structured superblock for ZONED mode
Superblock (and its copies) is the only data structure in btrfs which
has a fixed location on a device. Since we cannot overwrite in a
sequential write required zone, we cannot place superblock in the zone.
One easy solution is limiting superblock and copies to be placed only in
conventional zones. However, this method has two downsides: one is
reduced number of superblock copies. The location of the second copy of
superblock is 256GB, which is in a sequential write required zone on
typical devices in the market today. So, the number of superblock and
copies is limited to be two. Second downside is that we cannot support
devices which have no conventional zones at all.
To solve these two problems, we employ superblock log writing. It uses
two adjacent zones as a circular buffer to write updated superblocks.
Once the first zone is filled up, start writing into the second one.
Then, when both zones are filled up and before starting to write to the
first zone again, it reset the first zone.
We can determine the position of the latest superblock by reading write
pointer information from a device. One corner case is when both zones
are full. For this situation, we read out the last superblock of each
zone, and compare them to determine which zone is older.
The following zones are reserved as the circular buffer on ZONED btrfs.
- The primary superblock: zones 0 and 1
- The first copy: zones 16 and 17
- The second copy: zones 1024 or zone at 256GB which is minimum, and
next to it
If these reserved zones are conventional, superblock is written fixed at
the start of the zone without logging.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2020-11-10 19:26:14 +08:00
|
|
|
if (zone->cond == BLK_ZONE_COND_EMPTY)
|
|
|
|
zone->cond = BLK_ZONE_COND_IMP_OPEN;
|
|
|
|
|
2021-08-19 20:19:14 +08:00
|
|
|
zone->wp += SUPER_INFO_SECTORS;
|
|
|
|
|
|
|
|
if (sb_zone_is_full(zone)) {
|
|
|
|
/*
|
|
|
|
* No room left to write new superblock. Since
|
|
|
|
* superblock is written with REQ_SYNC, it is safe to
|
|
|
|
* finish the zone now.
|
|
|
|
*
|
|
|
|
* If the write pointer is exactly at the capacity,
|
|
|
|
* explicit ZONE_FINISH is not necessary.
|
|
|
|
*/
|
|
|
|
if (zone->wp != zone->start + zone->capacity) {
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = blkdev_zone_mgmt(device->bdev,
|
|
|
|
REQ_OP_ZONE_FINISH, zone->start,
|
|
|
|
zone->len, GFP_NOFS);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
btrfs: implement log-structured superblock for ZONED mode
Superblock (and its copies) is the only data structure in btrfs which
has a fixed location on a device. Since we cannot overwrite in a
sequential write required zone, we cannot place superblock in the zone.
One easy solution is limiting superblock and copies to be placed only in
conventional zones. However, this method has two downsides: one is
reduced number of superblock copies. The location of the second copy of
superblock is 256GB, which is in a sequential write required zone on
typical devices in the market today. So, the number of superblock and
copies is limited to be two. Second downside is that we cannot support
devices which have no conventional zones at all.
To solve these two problems, we employ superblock log writing. It uses
two adjacent zones as a circular buffer to write updated superblocks.
Once the first zone is filled up, start writing into the second one.
Then, when both zones are filled up and before starting to write to the
first zone again, it reset the first zone.
We can determine the position of the latest superblock by reading write
pointer information from a device. One corner case is when both zones
are full. For this situation, we read out the last superblock of each
zone, and compare them to determine which zone is older.
The following zones are reserved as the circular buffer on ZONED btrfs.
- The primary superblock: zones 0 and 1
- The first copy: zones 16 and 17
- The second copy: zones 1024 or zone at 256GB which is minimum, and
next to it
If these reserved zones are conventional, superblock is written fixed at
the start of the zone without logging.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2020-11-10 19:26:14 +08:00
|
|
|
|
2021-08-19 20:19:14 +08:00
|
|
|
zone->wp = zone->start + zone->len;
|
btrfs: implement log-structured superblock for ZONED mode
Superblock (and its copies) is the only data structure in btrfs which
has a fixed location on a device. Since we cannot overwrite in a
sequential write required zone, we cannot place superblock in the zone.
One easy solution is limiting superblock and copies to be placed only in
conventional zones. However, this method has two downsides: one is
reduced number of superblock copies. The location of the second copy of
superblock is 256GB, which is in a sequential write required zone on
typical devices in the market today. So, the number of superblock and
copies is limited to be two. Second downside is that we cannot support
devices which have no conventional zones at all.
To solve these two problems, we employ superblock log writing. It uses
two adjacent zones as a circular buffer to write updated superblocks.
Once the first zone is filled up, start writing into the second one.
Then, when both zones are filled up and before starting to write to the
first zone again, it reset the first zone.
We can determine the position of the latest superblock by reading write
pointer information from a device. One corner case is when both zones
are full. For this situation, we read out the last superblock of each
zone, and compare them to determine which zone is older.
The following zones are reserved as the circular buffer on ZONED btrfs.
- The primary superblock: zones 0 and 1
- The first copy: zones 16 and 17
- The second copy: zones 1024 or zone at 256GB which is minimum, and
next to it
If these reserved zones are conventional, superblock is written fixed at
the start of the zone without logging.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2020-11-10 19:26:14 +08:00
|
|
|
zone->cond = BLK_ZONE_COND_FULL;
|
2021-08-19 20:19:14 +08:00
|
|
|
}
|
|
|
|
return 0;
|
btrfs: implement log-structured superblock for ZONED mode
Superblock (and its copies) is the only data structure in btrfs which
has a fixed location on a device. Since we cannot overwrite in a
sequential write required zone, we cannot place superblock in the zone.
One easy solution is limiting superblock and copies to be placed only in
conventional zones. However, this method has two downsides: one is
reduced number of superblock copies. The location of the second copy of
superblock is 256GB, which is in a sequential write required zone on
typical devices in the market today. So, the number of superblock and
copies is limited to be two. Second downside is that we cannot support
devices which have no conventional zones at all.
To solve these two problems, we employ superblock log writing. It uses
two adjacent zones as a circular buffer to write updated superblocks.
Once the first zone is filled up, start writing into the second one.
Then, when both zones are filled up and before starting to write to the
first zone again, it reset the first zone.
We can determine the position of the latest superblock by reading write
pointer information from a device. One corner case is when both zones
are full. For this situation, we read out the last superblock of each
zone, and compare them to determine which zone is older.
The following zones are reserved as the circular buffer on ZONED btrfs.
- The primary superblock: zones 0 and 1
- The first copy: zones 16 and 17
- The second copy: zones 1024 or zone at 256GB which is minimum, and
next to it
If these reserved zones are conventional, superblock is written fixed at
the start of the zone without logging.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2020-11-10 19:26:14 +08:00
|
|
|
}
|
|
|
|
|
2021-08-19 20:19:14 +08:00
|
|
|
/* All the zones are FULL. Should not reach here. */
|
|
|
|
ASSERT(0);
|
|
|
|
return -EIO;
|
btrfs: implement log-structured superblock for ZONED mode
Superblock (and its copies) is the only data structure in btrfs which
has a fixed location on a device. Since we cannot overwrite in a
sequential write required zone, we cannot place superblock in the zone.
One easy solution is limiting superblock and copies to be placed only in
conventional zones. However, this method has two downsides: one is
reduced number of superblock copies. The location of the second copy of
superblock is 256GB, which is in a sequential write required zone on
typical devices in the market today. So, the number of superblock and
copies is limited to be two. Second downside is that we cannot support
devices which have no conventional zones at all.
To solve these two problems, we employ superblock log writing. It uses
two adjacent zones as a circular buffer to write updated superblocks.
Once the first zone is filled up, start writing into the second one.
Then, when both zones are filled up and before starting to write to the
first zone again, it reset the first zone.
We can determine the position of the latest superblock by reading write
pointer information from a device. One corner case is when both zones
are full. For this situation, we read out the last superblock of each
zone, and compare them to determine which zone is older.
The following zones are reserved as the circular buffer on ZONED btrfs.
- The primary superblock: zones 0 and 1
- The first copy: zones 16 and 17
- The second copy: zones 1024 or zone at 256GB which is minimum, and
next to it
If these reserved zones are conventional, superblock is written fixed at
the start of the zone without logging.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2020-11-10 19:26:14 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
int btrfs_reset_sb_log_zones(struct block_device *bdev, int mirror)
|
|
|
|
{
|
|
|
|
sector_t zone_sectors;
|
|
|
|
sector_t nr_sectors;
|
|
|
|
u8 zone_sectors_shift;
|
|
|
|
u32 sb_zone;
|
|
|
|
u32 nr_zones;
|
|
|
|
|
|
|
|
zone_sectors = bdev_zone_sectors(bdev);
|
|
|
|
zone_sectors_shift = ilog2(zone_sectors);
|
2020-12-17 04:57:51 +08:00
|
|
|
nr_sectors = bdev_nr_sectors(bdev);
|
btrfs: implement log-structured superblock for ZONED mode
Superblock (and its copies) is the only data structure in btrfs which
has a fixed location on a device. Since we cannot overwrite in a
sequential write required zone, we cannot place superblock in the zone.
One easy solution is limiting superblock and copies to be placed only in
conventional zones. However, this method has two downsides: one is
reduced number of superblock copies. The location of the second copy of
superblock is 256GB, which is in a sequential write required zone on
typical devices in the market today. So, the number of superblock and
copies is limited to be two. Second downside is that we cannot support
devices which have no conventional zones at all.
To solve these two problems, we employ superblock log writing. It uses
two adjacent zones as a circular buffer to write updated superblocks.
Once the first zone is filled up, start writing into the second one.
Then, when both zones are filled up and before starting to write to the
first zone again, it reset the first zone.
We can determine the position of the latest superblock by reading write
pointer information from a device. One corner case is when both zones
are full. For this situation, we read out the last superblock of each
zone, and compare them to determine which zone is older.
The following zones are reserved as the circular buffer on ZONED btrfs.
- The primary superblock: zones 0 and 1
- The first copy: zones 16 and 17
- The second copy: zones 1024 or zone at 256GB which is minimum, and
next to it
If these reserved zones are conventional, superblock is written fixed at
the start of the zone without logging.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2020-11-10 19:26:14 +08:00
|
|
|
nr_zones = nr_sectors >> zone_sectors_shift;
|
|
|
|
|
|
|
|
sb_zone = sb_zone_number(zone_sectors_shift + SECTOR_SHIFT, mirror);
|
|
|
|
if (sb_zone + 1 >= nr_zones)
|
|
|
|
return -ENOENT;
|
|
|
|
|
|
|
|
return blkdev_zone_mgmt(bdev, REQ_OP_ZONE_RESET,
|
2021-05-27 14:27:32 +08:00
|
|
|
zone_start_sector(sb_zone, bdev),
|
btrfs: implement log-structured superblock for ZONED mode
Superblock (and its copies) is the only data structure in btrfs which
has a fixed location on a device. Since we cannot overwrite in a
sequential write required zone, we cannot place superblock in the zone.
One easy solution is limiting superblock and copies to be placed only in
conventional zones. However, this method has two downsides: one is
reduced number of superblock copies. The location of the second copy of
superblock is 256GB, which is in a sequential write required zone on
typical devices in the market today. So, the number of superblock and
copies is limited to be two. Second downside is that we cannot support
devices which have no conventional zones at all.
To solve these two problems, we employ superblock log writing. It uses
two adjacent zones as a circular buffer to write updated superblocks.
Once the first zone is filled up, start writing into the second one.
Then, when both zones are filled up and before starting to write to the
first zone again, it reset the first zone.
We can determine the position of the latest superblock by reading write
pointer information from a device. One corner case is when both zones
are full. For this situation, we read out the last superblock of each
zone, and compare them to determine which zone is older.
The following zones are reserved as the circular buffer on ZONED btrfs.
- The primary superblock: zones 0 and 1
- The first copy: zones 16 and 17
- The second copy: zones 1024 or zone at 256GB which is minimum, and
next to it
If these reserved zones are conventional, superblock is written fixed at
the start of the zone without logging.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2020-11-10 19:26:14 +08:00
|
|
|
zone_sectors * BTRFS_NR_SB_LOG_ZONES, GFP_NOFS);
|
|
|
|
}
|
2021-02-04 18:21:48 +08:00
|
|
|
|
2022-10-27 20:21:42 +08:00
|
|
|
/*
|
|
|
|
* Find allocatable zones within a given region.
|
2021-02-04 18:21:48 +08:00
|
|
|
*
|
|
|
|
* @device: the device to allocate a region on
|
|
|
|
* @hole_start: the position of the hole to allocate the region
|
|
|
|
* @num_bytes: size of wanted region
|
|
|
|
* @hole_end: the end of the hole
|
|
|
|
* @return: position of allocatable zones
|
|
|
|
*
|
|
|
|
* Allocatable region should not contain any superblock locations.
|
|
|
|
*/
|
|
|
|
u64 btrfs_find_allocatable_zones(struct btrfs_device *device, u64 hole_start,
|
|
|
|
u64 hole_end, u64 num_bytes)
|
|
|
|
{
|
|
|
|
struct btrfs_zoned_device_info *zinfo = device->zone_info;
|
|
|
|
const u8 shift = zinfo->zone_size_shift;
|
|
|
|
u64 nzones = num_bytes >> shift;
|
|
|
|
u64 pos = hole_start;
|
|
|
|
u64 begin, end;
|
|
|
|
bool have_sb;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
ASSERT(IS_ALIGNED(hole_start, zinfo->zone_size));
|
|
|
|
ASSERT(IS_ALIGNED(num_bytes, zinfo->zone_size));
|
|
|
|
|
|
|
|
while (pos < hole_end) {
|
|
|
|
begin = pos >> shift;
|
|
|
|
end = begin + nzones;
|
|
|
|
|
|
|
|
if (end > zinfo->nr_zones)
|
|
|
|
return hole_end;
|
|
|
|
|
|
|
|
/* Check if zones in the region are all empty */
|
|
|
|
if (btrfs_dev_is_sequential(device, pos) &&
|
|
|
|
find_next_zero_bit(zinfo->empty_zones, end, begin) != end) {
|
|
|
|
pos += zinfo->zone_size;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
have_sb = false;
|
|
|
|
for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
|
|
|
|
u32 sb_zone;
|
|
|
|
u64 sb_pos;
|
|
|
|
|
|
|
|
sb_zone = sb_zone_number(shift, i);
|
|
|
|
if (!(end <= sb_zone ||
|
|
|
|
sb_zone + BTRFS_NR_SB_LOG_ZONES <= begin)) {
|
|
|
|
have_sb = true;
|
2021-05-27 14:27:32 +08:00
|
|
|
pos = zone_start_physical(
|
|
|
|
sb_zone + BTRFS_NR_SB_LOG_ZONES, zinfo);
|
2021-02-04 18:21:48 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We also need to exclude regular superblock positions */
|
|
|
|
sb_pos = btrfs_sb_offset(i);
|
|
|
|
if (!(pos + num_bytes <= sb_pos ||
|
|
|
|
sb_pos + BTRFS_SUPER_INFO_SIZE <= pos)) {
|
|
|
|
have_sb = true;
|
|
|
|
pos = ALIGN(sb_pos + BTRFS_SUPER_INFO_SIZE,
|
|
|
|
zinfo->zone_size);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!have_sb)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return pos;
|
|
|
|
}
|
|
|
|
|
2021-08-19 20:19:17 +08:00
|
|
|
static bool btrfs_dev_set_active_zone(struct btrfs_device *device, u64 pos)
|
|
|
|
{
|
|
|
|
struct btrfs_zoned_device_info *zone_info = device->zone_info;
|
|
|
|
unsigned int zno = (pos >> zone_info->zone_size_shift);
|
|
|
|
|
|
|
|
/* We can use any number of zones */
|
|
|
|
if (zone_info->max_active_zones == 0)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
if (!test_bit(zno, zone_info->active_zones)) {
|
|
|
|
/* Active zone left? */
|
|
|
|
if (atomic_dec_if_positive(&zone_info->active_zones_left) < 0)
|
|
|
|
return false;
|
|
|
|
if (test_and_set_bit(zno, zone_info->active_zones)) {
|
|
|
|
/* Someone already set the bit */
|
|
|
|
atomic_inc(&zone_info->active_zones_left);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void btrfs_dev_clear_active_zone(struct btrfs_device *device, u64 pos)
|
|
|
|
{
|
|
|
|
struct btrfs_zoned_device_info *zone_info = device->zone_info;
|
|
|
|
unsigned int zno = (pos >> zone_info->zone_size_shift);
|
|
|
|
|
|
|
|
/* We can use any number of zones */
|
|
|
|
if (zone_info->max_active_zones == 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (test_and_clear_bit(zno, zone_info->active_zones))
|
|
|
|
atomic_inc(&zone_info->active_zones_left);
|
|
|
|
}
|
|
|
|
|
2021-02-04 18:21:48 +08:00
|
|
|
int btrfs_reset_device_zone(struct btrfs_device *device, u64 physical,
|
|
|
|
u64 length, u64 *bytes)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
*bytes = 0;
|
|
|
|
ret = blkdev_zone_mgmt(device->bdev, REQ_OP_ZONE_RESET,
|
|
|
|
physical >> SECTOR_SHIFT, length >> SECTOR_SHIFT,
|
|
|
|
GFP_NOFS);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
*bytes = length;
|
|
|
|
while (length) {
|
|
|
|
btrfs_dev_set_zone_empty(device, physical);
|
2021-08-19 20:19:17 +08:00
|
|
|
btrfs_dev_clear_active_zone(device, physical);
|
2021-02-04 18:21:48 +08:00
|
|
|
physical += device->zone_info->zone_size;
|
|
|
|
length -= device->zone_info->zone_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int btrfs_ensure_empty_zones(struct btrfs_device *device, u64 start, u64 size)
|
|
|
|
{
|
|
|
|
struct btrfs_zoned_device_info *zinfo = device->zone_info;
|
|
|
|
const u8 shift = zinfo->zone_size_shift;
|
|
|
|
unsigned long begin = start >> shift;
|
|
|
|
unsigned long end = (start + size) >> shift;
|
|
|
|
u64 pos;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ASSERT(IS_ALIGNED(start, zinfo->zone_size));
|
|
|
|
ASSERT(IS_ALIGNED(size, zinfo->zone_size));
|
|
|
|
|
|
|
|
if (end > zinfo->nr_zones)
|
|
|
|
return -ERANGE;
|
|
|
|
|
|
|
|
/* All the zones are conventional */
|
2023-04-18 16:45:24 +08:00
|
|
|
if (find_next_bit(zinfo->seq_zones, end, begin) == end)
|
2021-02-04 18:21:48 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* All the zones are sequential and empty */
|
2023-04-18 16:45:24 +08:00
|
|
|
if (find_next_zero_bit(zinfo->seq_zones, end, begin) == end &&
|
|
|
|
find_next_zero_bit(zinfo->empty_zones, end, begin) == end)
|
2021-02-04 18:21:48 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
for (pos = start; pos < start + size; pos += zinfo->zone_size) {
|
|
|
|
u64 reset_bytes;
|
|
|
|
|
|
|
|
if (!btrfs_dev_is_sequential(device, pos) ||
|
|
|
|
btrfs_dev_is_empty_zone(device, pos))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* Free regions should be empty */
|
|
|
|
btrfs_warn_in_rcu(
|
|
|
|
device->fs_info,
|
|
|
|
"zoned: resetting device %s (devid %llu) zone %llu for allocation",
|
|
|
|
rcu_str_deref(device->name), device->devid, pos >> shift);
|
|
|
|
WARN_ON_ONCE(1);
|
|
|
|
|
|
|
|
ret = btrfs_reset_device_zone(device, pos, zinfo->zone_size,
|
|
|
|
&reset_bytes);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2021-02-04 18:21:50 +08:00
|
|
|
|
2021-02-04 18:21:51 +08:00
|
|
|
/*
|
|
|
|
* Calculate an allocation pointer from the extent allocation information
|
|
|
|
* for a block group consist of conventional zones. It is pointed to the
|
|
|
|
* end of the highest addressed extent in the block group as an allocation
|
|
|
|
* offset.
|
|
|
|
*/
|
|
|
|
static int calculate_alloc_pointer(struct btrfs_block_group *cache,
|
2022-09-05 20:38:24 +08:00
|
|
|
u64 *offset_ret, bool new)
|
2021-02-04 18:21:51 +08:00
|
|
|
{
|
|
|
|
struct btrfs_fs_info *fs_info = cache->fs_info;
|
2021-11-06 04:45:45 +08:00
|
|
|
struct btrfs_root *root;
|
2021-02-04 18:21:51 +08:00
|
|
|
struct btrfs_path *path;
|
|
|
|
struct btrfs_key key;
|
|
|
|
struct btrfs_key found_key;
|
|
|
|
int ret;
|
|
|
|
u64 length;
|
|
|
|
|
2022-09-05 20:38:24 +08:00
|
|
|
/*
|
|
|
|
* Avoid tree lookups for a new block group, there's no use for it.
|
|
|
|
* It must always be 0.
|
|
|
|
*
|
|
|
|
* Also, we have a lock chain of extent buffer lock -> chunk mutex.
|
|
|
|
* For new a block group, this function is called from
|
|
|
|
* btrfs_make_block_group() which is already taking the chunk mutex.
|
|
|
|
* Thus, we cannot call calculate_alloc_pointer() which takes extent
|
|
|
|
* buffer locks to avoid deadlock.
|
|
|
|
*/
|
|
|
|
if (new) {
|
|
|
|
*offset_ret = 0;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-02-04 18:21:51 +08:00
|
|
|
path = btrfs_alloc_path();
|
|
|
|
if (!path)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
key.objectid = cache->start + cache->length;
|
|
|
|
key.type = 0;
|
|
|
|
key.offset = 0;
|
|
|
|
|
2021-11-06 04:45:45 +08:00
|
|
|
root = btrfs_extent_root(fs_info, key.objectid);
|
2021-02-04 18:21:51 +08:00
|
|
|
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
|
|
|
|
/* We should not find the exact match */
|
|
|
|
if (!ret)
|
|
|
|
ret = -EUCLEAN;
|
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
ret = btrfs_previous_extent_item(root, path, cache->start);
|
|
|
|
if (ret) {
|
|
|
|
if (ret == 1) {
|
|
|
|
ret = 0;
|
|
|
|
*offset_ret = 0;
|
|
|
|
}
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
|
|
|
|
|
|
|
|
if (found_key.type == BTRFS_EXTENT_ITEM_KEY)
|
|
|
|
length = found_key.offset;
|
|
|
|
else
|
|
|
|
length = fs_info->nodesize;
|
|
|
|
|
|
|
|
if (!(found_key.objectid >= cache->start &&
|
|
|
|
found_key.objectid + length <= cache->start + cache->length)) {
|
|
|
|
ret = -EUCLEAN;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
*offset_ret = found_key.objectid + length - cache->start;
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
out:
|
|
|
|
btrfs_free_path(path);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
|
2021-02-04 18:21:50 +08:00
|
|
|
{
|
|
|
|
struct btrfs_fs_info *fs_info = cache->fs_info;
|
|
|
|
struct extent_map_tree *em_tree = &fs_info->mapping_tree;
|
|
|
|
struct extent_map *em;
|
|
|
|
struct map_lookup *map;
|
|
|
|
struct btrfs_device *device;
|
|
|
|
u64 logical = cache->start;
|
|
|
|
u64 length = cache->length;
|
|
|
|
int ret;
|
|
|
|
int i;
|
|
|
|
unsigned int nofs_flag;
|
|
|
|
u64 *alloc_offsets = NULL;
|
2021-08-19 20:19:08 +08:00
|
|
|
u64 *caps = NULL;
|
2022-01-26 21:46:22 +08:00
|
|
|
u64 *physical = NULL;
|
2021-08-19 20:19:18 +08:00
|
|
|
unsigned long *active = NULL;
|
2021-02-04 18:21:51 +08:00
|
|
|
u64 last_alloc = 0;
|
2021-02-04 18:21:50 +08:00
|
|
|
u32 num_sequential = 0, num_conventional = 0;
|
|
|
|
|
|
|
|
if (!btrfs_is_zoned(fs_info))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* Sanity check */
|
|
|
|
if (!IS_ALIGNED(length, fs_info->zone_size)) {
|
|
|
|
btrfs_err(fs_info,
|
|
|
|
"zoned: block group %llu len %llu unaligned to zone size %llu",
|
|
|
|
logical, length, fs_info->zone_size);
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Get the chunk mapping */
|
|
|
|
read_lock(&em_tree->lock);
|
|
|
|
em = lookup_extent_mapping(em_tree, logical, length);
|
|
|
|
read_unlock(&em_tree->lock);
|
|
|
|
|
|
|
|
if (!em)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
map = em->map_lookup;
|
|
|
|
|
2021-10-03 16:06:56 +08:00
|
|
|
cache->physical_map = kmemdup(map, map_lookup_size(map->num_stripes), GFP_NOFS);
|
2021-08-19 20:19:16 +08:00
|
|
|
if (!cache->physical_map) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2021-02-04 18:21:50 +08:00
|
|
|
alloc_offsets = kcalloc(map->num_stripes, sizeof(*alloc_offsets), GFP_NOFS);
|
|
|
|
if (!alloc_offsets) {
|
2021-08-19 20:19:16 +08:00
|
|
|
ret = -ENOMEM;
|
|
|
|
goto out;
|
2021-02-04 18:21:50 +08:00
|
|
|
}
|
|
|
|
|
2021-08-19 20:19:08 +08:00
|
|
|
caps = kcalloc(map->num_stripes, sizeof(*caps), GFP_NOFS);
|
|
|
|
if (!caps) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2022-01-26 21:46:22 +08:00
|
|
|
physical = kcalloc(map->num_stripes, sizeof(*physical), GFP_NOFS);
|
|
|
|
if (!physical) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2021-08-19 20:19:18 +08:00
|
|
|
active = bitmap_zalloc(map->num_stripes, GFP_NOFS);
|
|
|
|
if (!active) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2021-02-04 18:21:50 +08:00
|
|
|
for (i = 0; i < map->num_stripes; i++) {
|
|
|
|
bool is_sequential;
|
|
|
|
struct blk_zone zone;
|
2021-02-04 18:22:12 +08:00
|
|
|
struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
|
|
|
|
int dev_replace_is_ongoing = 0;
|
2021-02-04 18:21:50 +08:00
|
|
|
|
|
|
|
device = map->stripes[i].dev;
|
2022-01-26 21:46:22 +08:00
|
|
|
physical[i] = map->stripes[i].physical;
|
2021-02-04 18:21:50 +08:00
|
|
|
|
|
|
|
if (device->bdev == NULL) {
|
|
|
|
alloc_offsets[i] = WP_MISSING_DEV;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2022-01-26 21:46:22 +08:00
|
|
|
is_sequential = btrfs_dev_is_sequential(device, physical[i]);
|
2021-02-04 18:21:50 +08:00
|
|
|
if (is_sequential)
|
|
|
|
num_sequential++;
|
|
|
|
else
|
|
|
|
num_conventional++;
|
|
|
|
|
2022-09-05 20:38:24 +08:00
|
|
|
/*
|
|
|
|
* Consider a zone as active if we can allow any number of
|
|
|
|
* active zones.
|
|
|
|
*/
|
|
|
|
if (!device->zone_info->max_active_zones)
|
|
|
|
__set_bit(i, active);
|
|
|
|
|
2021-02-04 18:21:50 +08:00
|
|
|
if (!is_sequential) {
|
|
|
|
alloc_offsets[i] = WP_CONVENTIONAL;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This zone will be used for allocation, so mark this zone
|
|
|
|
* non-empty.
|
|
|
|
*/
|
2022-01-26 21:46:22 +08:00
|
|
|
btrfs_dev_clear_zone_empty(device, physical[i]);
|
2021-02-04 18:21:50 +08:00
|
|
|
|
2021-02-04 18:22:12 +08:00
|
|
|
down_read(&dev_replace->rwsem);
|
|
|
|
dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
|
|
|
|
if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL)
|
2022-01-26 21:46:22 +08:00
|
|
|
btrfs_dev_clear_zone_empty(dev_replace->tgtdev, physical[i]);
|
2021-02-04 18:22:12 +08:00
|
|
|
up_read(&dev_replace->rwsem);
|
|
|
|
|
2021-02-04 18:21:50 +08:00
|
|
|
/*
|
|
|
|
* The group is mapped to a sequential zone. Get the zone write
|
|
|
|
* pointer to determine the allocation offset within the zone.
|
|
|
|
*/
|
2022-01-26 21:46:22 +08:00
|
|
|
WARN_ON(!IS_ALIGNED(physical[i], fs_info->zone_size));
|
2021-02-04 18:21:50 +08:00
|
|
|
nofs_flag = memalloc_nofs_save();
|
2022-01-26 21:46:22 +08:00
|
|
|
ret = btrfs_get_dev_zone(device, physical[i], &zone);
|
2021-02-04 18:21:50 +08:00
|
|
|
memalloc_nofs_restore(nofs_flag);
|
|
|
|
if (ret == -EIO || ret == -EOPNOTSUPP) {
|
|
|
|
ret = 0;
|
|
|
|
alloc_offsets[i] = WP_MISSING_DEV;
|
|
|
|
continue;
|
|
|
|
} else if (ret) {
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2021-04-30 21:34:17 +08:00
|
|
|
if (zone.type == BLK_ZONE_TYPE_CONVENTIONAL) {
|
2021-04-30 21:34:17 +08:00
|
|
|
btrfs_err_in_rcu(fs_info,
|
|
|
|
"zoned: unexpected conventional zone %llu on device %s (devid %llu)",
|
|
|
|
zone.start << SECTOR_SHIFT,
|
|
|
|
rcu_str_deref(device->name), device->devid);
|
2021-04-30 21:34:17 +08:00
|
|
|
ret = -EIO;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2021-08-19 20:19:08 +08:00
|
|
|
caps[i] = (zone.capacity << SECTOR_SHIFT);
|
|
|
|
|
2021-02-04 18:21:50 +08:00
|
|
|
switch (zone.cond) {
|
|
|
|
case BLK_ZONE_COND_OFFLINE:
|
|
|
|
case BLK_ZONE_COND_READONLY:
|
|
|
|
btrfs_err(fs_info,
|
|
|
|
"zoned: offline/readonly zone %llu on device %s (devid %llu)",
|
2022-01-26 21:46:22 +08:00
|
|
|
physical[i] >> device->zone_info->zone_size_shift,
|
2021-02-04 18:21:50 +08:00
|
|
|
rcu_str_deref(device->name), device->devid);
|
|
|
|
alloc_offsets[i] = WP_MISSING_DEV;
|
|
|
|
break;
|
|
|
|
case BLK_ZONE_COND_EMPTY:
|
|
|
|
alloc_offsets[i] = 0;
|
|
|
|
break;
|
|
|
|
case BLK_ZONE_COND_FULL:
|
2021-08-19 20:19:08 +08:00
|
|
|
alloc_offsets[i] = caps[i];
|
2021-02-04 18:21:50 +08:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
/* Partially used zone */
|
|
|
|
alloc_offsets[i] =
|
|
|
|
((zone.wp - zone.start) << SECTOR_SHIFT);
|
2021-08-19 20:19:18 +08:00
|
|
|
__set_bit(i, active);
|
2021-02-04 18:21:50 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-02-04 18:22:03 +08:00
|
|
|
if (num_sequential > 0)
|
2022-11-01 03:33:46 +08:00
|
|
|
set_bit(BLOCK_GROUP_FLAG_SEQUENTIAL_ZONE, &cache->runtime_flags);
|
2021-02-04 18:22:03 +08:00
|
|
|
|
2021-02-04 18:21:50 +08:00
|
|
|
if (num_conventional > 0) {
|
2021-08-19 20:19:08 +08:00
|
|
|
/* Zone capacity is always zone size in emulation */
|
|
|
|
cache->zone_capacity = cache->length;
|
2022-09-05 20:38:24 +08:00
|
|
|
ret = calculate_alloc_pointer(cache, &last_alloc, new);
|
|
|
|
if (ret) {
|
|
|
|
btrfs_err(fs_info,
|
2021-02-04 18:21:51 +08:00
|
|
|
"zoned: failed to determine allocation offset of bg %llu",
|
2022-09-05 20:38:24 +08:00
|
|
|
cache->start);
|
|
|
|
goto out;
|
|
|
|
} else if (map->num_stripes == num_conventional) {
|
|
|
|
cache->alloc_offset = last_alloc;
|
2022-07-16 03:45:24 +08:00
|
|
|
set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &cache->runtime_flags);
|
2021-02-04 18:21:51 +08:00
|
|
|
goto out;
|
|
|
|
}
|
2021-02-04 18:21:50 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
switch (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
|
|
|
|
case 0: /* single */
|
2021-04-30 21:34:18 +08:00
|
|
|
if (alloc_offsets[0] == WP_MISSING_DEV) {
|
|
|
|
btrfs_err(fs_info,
|
|
|
|
"zoned: cannot recover write pointer for zone %llu",
|
2022-01-26 21:46:22 +08:00
|
|
|
physical[0]);
|
2021-04-30 21:34:18 +08:00
|
|
|
ret = -EIO;
|
|
|
|
goto out;
|
|
|
|
}
|
2021-02-04 18:21:50 +08:00
|
|
|
cache->alloc_offset = alloc_offsets[0];
|
2021-08-19 20:19:08 +08:00
|
|
|
cache->zone_capacity = caps[0];
|
2022-07-16 03:45:24 +08:00
|
|
|
if (test_bit(0, active))
|
|
|
|
set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &cache->runtime_flags);
|
2021-02-04 18:21:50 +08:00
|
|
|
break;
|
|
|
|
case BTRFS_BLOCK_GROUP_DUP:
|
2022-01-26 21:46:23 +08:00
|
|
|
if (map->type & BTRFS_BLOCK_GROUP_DATA) {
|
|
|
|
btrfs_err(fs_info, "zoned: profile DUP not yet supported on data bg");
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
if (alloc_offsets[0] == WP_MISSING_DEV) {
|
|
|
|
btrfs_err(fs_info,
|
|
|
|
"zoned: cannot recover write pointer for zone %llu",
|
|
|
|
physical[0]);
|
|
|
|
ret = -EIO;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
if (alloc_offsets[1] == WP_MISSING_DEV) {
|
|
|
|
btrfs_err(fs_info,
|
|
|
|
"zoned: cannot recover write pointer for zone %llu",
|
|
|
|
physical[1]);
|
|
|
|
ret = -EIO;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
if (alloc_offsets[0] != alloc_offsets[1]) {
|
|
|
|
btrfs_err(fs_info,
|
|
|
|
"zoned: write pointer offset mismatch of zones in DUP profile");
|
|
|
|
ret = -EIO;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
if (test_bit(0, active) != test_bit(1, active)) {
|
|
|
|
if (!btrfs_zone_activate(cache)) {
|
|
|
|
ret = -EIO;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
} else {
|
2022-07-16 03:45:24 +08:00
|
|
|
if (test_bit(0, active))
|
|
|
|
set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE,
|
|
|
|
&cache->runtime_flags);
|
2022-01-26 21:46:23 +08:00
|
|
|
}
|
|
|
|
cache->alloc_offset = alloc_offsets[0];
|
|
|
|
cache->zone_capacity = min(caps[0], caps[1]);
|
|
|
|
break;
|
2021-02-04 18:21:50 +08:00
|
|
|
case BTRFS_BLOCK_GROUP_RAID1:
|
|
|
|
case BTRFS_BLOCK_GROUP_RAID0:
|
|
|
|
case BTRFS_BLOCK_GROUP_RAID10:
|
|
|
|
case BTRFS_BLOCK_GROUP_RAID5:
|
|
|
|
case BTRFS_BLOCK_GROUP_RAID6:
|
|
|
|
/* non-single profiles are not supported yet */
|
|
|
|
default:
|
|
|
|
btrfs_err(fs_info, "zoned: profile %s not yet supported",
|
|
|
|
btrfs_bg_type_to_raid_name(map->type));
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
2021-04-30 21:34:18 +08:00
|
|
|
if (cache->alloc_offset > fs_info->zone_size) {
|
|
|
|
btrfs_err(fs_info,
|
|
|
|
"zoned: invalid write pointer %llu in block group %llu",
|
|
|
|
cache->alloc_offset, cache->start);
|
|
|
|
ret = -EIO;
|
|
|
|
}
|
|
|
|
|
2021-08-19 20:19:08 +08:00
|
|
|
if (cache->alloc_offset > cache->zone_capacity) {
|
|
|
|
btrfs_err(fs_info,
|
|
|
|
"zoned: invalid write pointer %llu (larger than zone capacity %llu) in block group %llu",
|
|
|
|
cache->alloc_offset, cache->zone_capacity,
|
|
|
|
cache->start);
|
|
|
|
ret = -EIO;
|
|
|
|
}
|
|
|
|
|
2021-02-04 18:21:51 +08:00
|
|
|
/* An extent is allocated after the write pointer */
|
|
|
|
if (!ret && num_conventional && last_alloc > cache->alloc_offset) {
|
|
|
|
btrfs_err(fs_info,
|
|
|
|
"zoned: got wrong write pointer in BG %llu: %llu > %llu",
|
|
|
|
logical, last_alloc, cache->alloc_offset);
|
|
|
|
ret = -EIO;
|
|
|
|
}
|
|
|
|
|
2022-09-05 20:38:24 +08:00
|
|
|
if (!ret) {
|
2021-02-04 18:22:08 +08:00
|
|
|
cache->meta_write_pointer = cache->alloc_offset + cache->start;
|
2022-07-16 03:45:24 +08:00
|
|
|
if (test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &cache->runtime_flags)) {
|
2022-09-05 20:38:24 +08:00
|
|
|
btrfs_get_block_group(cache);
|
|
|
|
spin_lock(&fs_info->zone_active_bgs_lock);
|
|
|
|
list_add_tail(&cache->active_bg_list,
|
|
|
|
&fs_info->zone_active_bgs);
|
|
|
|
spin_unlock(&fs_info->zone_active_bgs_lock);
|
|
|
|
}
|
|
|
|
} else {
|
2021-08-19 20:19:16 +08:00
|
|
|
kfree(cache->physical_map);
|
|
|
|
cache->physical_map = NULL;
|
|
|
|
}
|
2021-08-19 20:19:18 +08:00
|
|
|
bitmap_free(active);
|
2022-01-26 21:46:22 +08:00
|
|
|
kfree(physical);
|
2021-08-19 20:19:08 +08:00
|
|
|
kfree(caps);
|
2021-02-04 18:21:50 +08:00
|
|
|
kfree(alloc_offsets);
|
|
|
|
free_extent_map(em);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
2021-02-04 18:21:52 +08:00
|
|
|
|
|
|
|
void btrfs_calc_zone_unusable(struct btrfs_block_group *cache)
|
|
|
|
{
|
|
|
|
u64 unusable, free;
|
|
|
|
|
|
|
|
if (!btrfs_is_zoned(cache->fs_info))
|
|
|
|
return;
|
|
|
|
|
|
|
|
WARN_ON(cache->bytes_super != 0);
|
btrfs: zoned: count fresh BG region as zone unusable
The naming of space_info->active_total_bytes is misleading. It counts
not only active block groups but also full ones which are previously
active but now inactive. That confusion results in a bug not counting
the full BGs into active_total_bytes on mount time.
For a background, there are three kinds of block groups in terms of
activation.
1. Block groups never activated
2. Block groups currently active
3. Block groups previously active and currently inactive (due to fully
written or zone finish)
What we really wanted to exclude from "total_bytes" is the total size of
BGs #1. They seem empty and allocatable but since they are not activated,
we cannot rely on them to do the space reservation.
And, since BGs #1 never get activated, they should have no "used",
"reserved" and "pinned" bytes.
OTOH, BGs #3 can be counted in the "total", since they are already full
we cannot allocate from them anyway. For them, "total_bytes == used +
reserved + pinned + zone_unusable" should hold.
Tracking #2 and #3 as "active_total_bytes" (current implementation) is
confusing. And, tracking #1 and subtract that properly from "total_bytes"
every time you need space reservation is cumbersome.
Instead, we can count the whole region of a newly allocated block group as
zone_unusable. Then, once that block group is activated, release
[0 .. zone_capacity] from the zone_unusable counters. With this, we can
eliminate the confusing ->active_total_bytes and the code will be common
among regular and the zoned mode. Also, no additional counter is needed
with this approach.
Fixes: 6a921de58992 ("btrfs: zoned: introduce space_info->active_total_bytes")
CC: stable@vger.kernel.org # 6.1+
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2023-03-13 15:06:13 +08:00
|
|
|
|
|
|
|
/* Check for block groups never get activated */
|
|
|
|
if (test_bit(BTRFS_FS_ACTIVE_ZONE_TRACKING, &cache->fs_info->flags) &&
|
|
|
|
cache->flags & (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_SYSTEM) &&
|
|
|
|
!test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &cache->runtime_flags) &&
|
|
|
|
cache->alloc_offset == 0) {
|
|
|
|
unusable = cache->length;
|
|
|
|
free = 0;
|
|
|
|
} else {
|
|
|
|
unusable = (cache->alloc_offset - cache->used) +
|
|
|
|
(cache->length - cache->zone_capacity);
|
|
|
|
free = cache->zone_capacity - cache->alloc_offset;
|
|
|
|
}
|
2021-02-04 18:21:52 +08:00
|
|
|
|
|
|
|
/* We only need ->free_space in ALLOC_SEQ block groups */
|
|
|
|
cache->cached = BTRFS_CACHE_FINISHED;
|
|
|
|
cache->free_space_ctl->free_space = free;
|
|
|
|
cache->zone_unusable = unusable;
|
|
|
|
}
|
2021-02-04 18:21:54 +08:00
|
|
|
|
|
|
|
void btrfs_redirty_list_add(struct btrfs_transaction *trans,
|
|
|
|
struct extent_buffer *eb)
|
|
|
|
{
|
|
|
|
struct btrfs_fs_info *fs_info = eb->fs_info;
|
|
|
|
|
|
|
|
if (!btrfs_is_zoned(fs_info) ||
|
|
|
|
btrfs_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN) ||
|
|
|
|
!list_empty(&eb->release_list))
|
|
|
|
return;
|
|
|
|
|
|
|
|
set_extent_buffer_dirty(eb);
|
|
|
|
set_extent_bits_nowait(&trans->dirty_pages, eb->start,
|
|
|
|
eb->start + eb->len - 1, EXTENT_DIRTY);
|
|
|
|
memzero_extent_buffer(eb, 0, eb->len);
|
|
|
|
set_bit(EXTENT_BUFFER_NO_CHECK, &eb->bflags);
|
|
|
|
|
|
|
|
spin_lock(&trans->releasing_ebs_lock);
|
|
|
|
list_add_tail(&eb->release_list, &trans->releasing_ebs);
|
|
|
|
spin_unlock(&trans->releasing_ebs_lock);
|
|
|
|
atomic_inc(&eb->refs);
|
|
|
|
}
|
|
|
|
|
|
|
|
void btrfs_free_redirty_list(struct btrfs_transaction *trans)
|
|
|
|
{
|
|
|
|
spin_lock(&trans->releasing_ebs_lock);
|
|
|
|
while (!list_empty(&trans->releasing_ebs)) {
|
|
|
|
struct extent_buffer *eb;
|
|
|
|
|
|
|
|
eb = list_first_entry(&trans->releasing_ebs,
|
|
|
|
struct extent_buffer, release_list);
|
|
|
|
list_del_init(&eb->release_list);
|
|
|
|
free_extent_buffer(eb);
|
|
|
|
}
|
|
|
|
spin_unlock(&trans->releasing_ebs_lock);
|
|
|
|
}
|
2021-02-04 18:22:03 +08:00
|
|
|
|
2022-12-12 15:37:21 +08:00
|
|
|
bool btrfs_use_zone_append(struct btrfs_bio *bbio)
|
2021-02-04 18:22:03 +08:00
|
|
|
{
|
2022-12-12 15:37:21 +08:00
|
|
|
u64 start = (bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT);
|
|
|
|
struct btrfs_inode *inode = bbio->inode;
|
2023-03-23 17:01:20 +08:00
|
|
|
struct btrfs_fs_info *fs_info = bbio->fs_info;
|
2021-02-04 18:22:03 +08:00
|
|
|
struct btrfs_block_group *cache;
|
|
|
|
bool ret = false;
|
|
|
|
|
|
|
|
if (!btrfs_is_zoned(fs_info))
|
|
|
|
return false;
|
|
|
|
|
2023-03-23 17:01:20 +08:00
|
|
|
if (!inode || !is_data_inode(&inode->vfs_inode))
|
2021-02-04 18:22:03 +08:00
|
|
|
return false;
|
|
|
|
|
2022-12-12 15:37:22 +08:00
|
|
|
if (btrfs_op(&bbio->bio) != BTRFS_MAP_WRITE)
|
|
|
|
return false;
|
|
|
|
|
2021-09-09 00:19:28 +08:00
|
|
|
/*
|
|
|
|
* Using REQ_OP_ZONE_APPNED for relocation can break assumptions on the
|
|
|
|
* extent layout the relocation code has.
|
|
|
|
* Furthermore we have set aside own block-group from which only the
|
|
|
|
* relocation "process" can allocate and make sure only one process at a
|
|
|
|
* time can add pages to an extent that gets relocated, so it's safe to
|
|
|
|
* use regular REQ_OP_WRITE for this special case.
|
|
|
|
*/
|
|
|
|
if (btrfs_is_data_reloc_root(inode->root))
|
|
|
|
return false;
|
|
|
|
|
2021-05-18 23:40:27 +08:00
|
|
|
cache = btrfs_lookup_block_group(fs_info, start);
|
2021-02-04 18:22:03 +08:00
|
|
|
ASSERT(cache);
|
|
|
|
if (!cache)
|
|
|
|
return false;
|
|
|
|
|
2022-11-01 03:33:46 +08:00
|
|
|
ret = !!test_bit(BLOCK_GROUP_FLAG_SEQUENTIAL_ZONE, &cache->runtime_flags);
|
2021-02-04 18:22:03 +08:00
|
|
|
btrfs_put_block_group(cache);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
2021-02-04 18:22:05 +08:00
|
|
|
|
2023-01-21 14:50:18 +08:00
|
|
|
void btrfs_record_physical_zoned(struct btrfs_bio *bbio)
|
2021-02-04 18:22:05 +08:00
|
|
|
{
|
2023-01-21 14:50:18 +08:00
|
|
|
const u64 physical = bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT;
|
2021-02-04 18:22:05 +08:00
|
|
|
struct btrfs_ordered_extent *ordered;
|
|
|
|
|
2023-01-21 14:50:18 +08:00
|
|
|
ordered = btrfs_lookup_ordered_extent(bbio->inode, bbio->file_offset);
|
2021-02-04 18:22:05 +08:00
|
|
|
if (WARN_ON(!ordered))
|
|
|
|
return;
|
|
|
|
|
|
|
|
ordered->physical = physical;
|
|
|
|
btrfs_put_ordered_extent(ordered);
|
|
|
|
}
|
|
|
|
|
|
|
|
void btrfs_rewrite_logical_zoned(struct btrfs_ordered_extent *ordered)
|
|
|
|
{
|
|
|
|
struct btrfs_inode *inode = BTRFS_I(ordered->inode);
|
|
|
|
struct btrfs_fs_info *fs_info = inode->root->fs_info;
|
|
|
|
struct extent_map_tree *em_tree;
|
|
|
|
struct extent_map *em;
|
|
|
|
struct btrfs_ordered_sum *sum;
|
|
|
|
u64 orig_logical = ordered->disk_bytenr;
|
2022-12-12 15:37:23 +08:00
|
|
|
struct map_lookup *map;
|
|
|
|
u64 physical = ordered->physical;
|
|
|
|
u64 chunk_start_phys;
|
|
|
|
u64 logical;
|
2021-02-04 18:22:05 +08:00
|
|
|
|
2022-12-12 15:37:23 +08:00
|
|
|
em = btrfs_get_chunk_map(fs_info, orig_logical, 1);
|
|
|
|
if (IS_ERR(em))
|
2021-02-04 18:22:05 +08:00
|
|
|
return;
|
2022-12-12 15:37:23 +08:00
|
|
|
map = em->map_lookup;
|
|
|
|
chunk_start_phys = map->stripes[0].physical;
|
2021-02-04 18:22:05 +08:00
|
|
|
|
2022-12-12 15:37:23 +08:00
|
|
|
if (WARN_ON_ONCE(map->num_stripes > 1) ||
|
|
|
|
WARN_ON_ONCE((map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) != 0) ||
|
|
|
|
WARN_ON_ONCE(physical < chunk_start_phys) ||
|
|
|
|
WARN_ON_ONCE(physical > chunk_start_phys + em->orig_block_len)) {
|
|
|
|
free_extent_map(em);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
logical = em->start + (physical - map->stripes[0].physical);
|
|
|
|
free_extent_map(em);
|
2021-02-04 18:22:05 +08:00
|
|
|
|
2022-12-12 15:37:23 +08:00
|
|
|
if (orig_logical == logical)
|
|
|
|
return;
|
2021-02-04 18:22:05 +08:00
|
|
|
|
2022-12-12 15:37:23 +08:00
|
|
|
ordered->disk_bytenr = logical;
|
2021-02-04 18:22:05 +08:00
|
|
|
|
|
|
|
em_tree = &inode->extent_tree;
|
|
|
|
write_lock(&em_tree->lock);
|
|
|
|
em = search_extent_mapping(em_tree, ordered->file_offset,
|
|
|
|
ordered->num_bytes);
|
2022-12-12 15:37:23 +08:00
|
|
|
em->block_start = logical;
|
2021-02-04 18:22:05 +08:00
|
|
|
free_extent_map(em);
|
|
|
|
write_unlock(&em_tree->lock);
|
|
|
|
|
|
|
|
list_for_each_entry(sum, &ordered->list, list) {
|
2022-12-12 15:37:23 +08:00
|
|
|
if (logical < orig_logical)
|
|
|
|
sum->bytenr -= orig_logical - logical;
|
2021-02-04 18:22:05 +08:00
|
|
|
else
|
2022-12-12 15:37:23 +08:00
|
|
|
sum->bytenr += logical - orig_logical;
|
2021-02-04 18:22:05 +08:00
|
|
|
}
|
|
|
|
}
|
2021-02-04 18:22:08 +08:00
|
|
|
|
|
|
|
bool btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info,
|
|
|
|
struct extent_buffer *eb,
|
|
|
|
struct btrfs_block_group **cache_ret)
|
|
|
|
{
|
|
|
|
struct btrfs_block_group *cache;
|
|
|
|
bool ret = true;
|
|
|
|
|
|
|
|
if (!btrfs_is_zoned(fs_info))
|
|
|
|
return true;
|
|
|
|
|
2021-12-07 22:28:35 +08:00
|
|
|
cache = btrfs_lookup_block_group(fs_info, eb->start);
|
|
|
|
if (!cache)
|
|
|
|
return true;
|
2021-02-04 18:22:08 +08:00
|
|
|
|
2021-12-07 22:28:35 +08:00
|
|
|
if (cache->meta_write_pointer != eb->start) {
|
2021-02-04 18:22:08 +08:00
|
|
|
btrfs_put_block_group(cache);
|
|
|
|
cache = NULL;
|
2021-12-07 22:28:35 +08:00
|
|
|
ret = false;
|
|
|
|
} else {
|
|
|
|
cache->meta_write_pointer = eb->start + eb->len;
|
2021-02-04 18:22:08 +08:00
|
|
|
}
|
|
|
|
|
2021-12-07 22:28:35 +08:00
|
|
|
*cache_ret = cache;
|
2021-02-04 18:22:08 +08:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
void btrfs_revert_meta_write_pointer(struct btrfs_block_group *cache,
|
|
|
|
struct extent_buffer *eb)
|
|
|
|
{
|
|
|
|
if (!btrfs_is_zoned(eb->fs_info) || !cache)
|
|
|
|
return;
|
|
|
|
|
|
|
|
ASSERT(cache->meta_write_pointer == eb->start + eb->len);
|
|
|
|
cache->meta_write_pointer = eb->start;
|
|
|
|
}
|
2021-02-04 18:22:13 +08:00
|
|
|
|
|
|
|
int btrfs_zoned_issue_zeroout(struct btrfs_device *device, u64 physical, u64 length)
|
|
|
|
{
|
|
|
|
if (!btrfs_dev_is_sequential(device, physical))
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
|
|
return blkdev_issue_zeroout(device->bdev, physical >> SECTOR_SHIFT,
|
|
|
|
length >> SECTOR_SHIFT, GFP_NOFS, 0);
|
|
|
|
}
|
2021-02-04 18:22:14 +08:00
|
|
|
|
|
|
|
static int read_zone_info(struct btrfs_fs_info *fs_info, u64 logical,
|
|
|
|
struct blk_zone *zone)
|
|
|
|
{
|
2021-09-15 15:17:16 +08:00
|
|
|
struct btrfs_io_context *bioc = NULL;
|
2021-02-04 18:22:14 +08:00
|
|
|
u64 mapped_length = PAGE_SIZE;
|
|
|
|
unsigned int nofs_flag;
|
|
|
|
int nmirrors;
|
|
|
|
int i, ret;
|
|
|
|
|
|
|
|
ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical,
|
2021-09-15 15:17:16 +08:00
|
|
|
&mapped_length, &bioc);
|
|
|
|
if (ret || !bioc || mapped_length < PAGE_SIZE) {
|
2022-07-01 00:03:19 +08:00
|
|
|
ret = -EIO;
|
|
|
|
goto out_put_bioc;
|
2021-02-04 18:22:14 +08:00
|
|
|
}
|
|
|
|
|
2022-07-01 00:03:19 +08:00
|
|
|
if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out_put_bioc;
|
|
|
|
}
|
2021-02-04 18:22:14 +08:00
|
|
|
|
|
|
|
nofs_flag = memalloc_nofs_save();
|
2021-09-15 15:17:16 +08:00
|
|
|
nmirrors = (int)bioc->num_stripes;
|
2021-02-04 18:22:14 +08:00
|
|
|
for (i = 0; i < nmirrors; i++) {
|
2021-09-15 15:17:16 +08:00
|
|
|
u64 physical = bioc->stripes[i].physical;
|
|
|
|
struct btrfs_device *dev = bioc->stripes[i].dev;
|
2021-02-04 18:22:14 +08:00
|
|
|
|
|
|
|
/* Missing device */
|
|
|
|
if (!dev->bdev)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
ret = btrfs_get_dev_zone(dev, physical, zone);
|
|
|
|
/* Failing device */
|
|
|
|
if (ret == -EIO || ret == -EOPNOTSUPP)
|
|
|
|
continue;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
memalloc_nofs_restore(nofs_flag);
|
2022-07-01 00:03:19 +08:00
|
|
|
out_put_bioc:
|
|
|
|
btrfs_put_bioc(bioc);
|
2021-02-04 18:22:14 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Synchronize write pointer in a zone at @physical_start on @tgt_dev, by
|
|
|
|
* filling zeros between @physical_pos to a write pointer of dev-replace
|
|
|
|
* source device.
|
|
|
|
*/
|
|
|
|
int btrfs_sync_zone_write_pointer(struct btrfs_device *tgt_dev, u64 logical,
|
|
|
|
u64 physical_start, u64 physical_pos)
|
|
|
|
{
|
|
|
|
struct btrfs_fs_info *fs_info = tgt_dev->fs_info;
|
|
|
|
struct blk_zone zone;
|
|
|
|
u64 length;
|
|
|
|
u64 wp;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!btrfs_dev_is_sequential(tgt_dev, physical_pos))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
ret = read_zone_info(fs_info, logical, &zone);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
wp = physical_start + ((zone.wp - zone.start) << SECTOR_SHIFT);
|
|
|
|
|
|
|
|
if (physical_pos == wp)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (physical_pos > wp)
|
|
|
|
return -EUCLEAN;
|
|
|
|
|
|
|
|
length = wp - physical_pos;
|
|
|
|
return btrfs_zoned_issue_zeroout(tgt_dev, physical_pos, length);
|
|
|
|
}
|
2021-05-18 23:40:29 +08:00
|
|
|
|
2022-10-27 20:21:42 +08:00
|
|
|
/*
|
2021-08-19 20:19:17 +08:00
|
|
|
* Activate block group and underlying device zones
|
|
|
|
*
|
|
|
|
* @block_group: the block group to activate
|
|
|
|
*
|
|
|
|
* Return: true on success, false otherwise
|
|
|
|
*/
|
|
|
|
bool btrfs_zone_activate(struct btrfs_block_group *block_group)
|
|
|
|
{
|
|
|
|
struct btrfs_fs_info *fs_info = block_group->fs_info;
|
2022-07-09 07:18:45 +08:00
|
|
|
struct btrfs_space_info *space_info = block_group->space_info;
|
2021-08-19 20:19:17 +08:00
|
|
|
struct map_lookup *map;
|
|
|
|
struct btrfs_device *device;
|
|
|
|
u64 physical;
|
|
|
|
bool ret;
|
2022-01-26 21:46:20 +08:00
|
|
|
int i;
|
2021-08-19 20:19:17 +08:00
|
|
|
|
|
|
|
if (!btrfs_is_zoned(block_group->fs_info))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
map = block_group->physical_map;
|
|
|
|
|
2022-07-09 07:18:45 +08:00
|
|
|
spin_lock(&space_info->lock);
|
2021-08-19 20:19:17 +08:00
|
|
|
spin_lock(&block_group->lock);
|
2022-07-16 03:45:24 +08:00
|
|
|
if (test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags)) {
|
2021-08-19 20:19:17 +08:00
|
|
|
ret = true;
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
2022-05-04 05:10:04 +08:00
|
|
|
/* No space left */
|
2022-05-04 08:48:50 +08:00
|
|
|
if (btrfs_zoned_bg_is_full(block_group)) {
|
2022-05-04 05:10:04 +08:00
|
|
|
ret = false;
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
2022-01-26 21:46:20 +08:00
|
|
|
for (i = 0; i < map->num_stripes; i++) {
|
|
|
|
device = map->stripes[i].dev;
|
|
|
|
physical = map->stripes[i].physical;
|
2021-08-19 20:19:17 +08:00
|
|
|
|
2022-01-26 21:46:20 +08:00
|
|
|
if (device->zone_info->max_active_zones == 0)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (!btrfs_dev_set_active_zone(device, physical)) {
|
|
|
|
/* Cannot activate the zone */
|
|
|
|
ret = false;
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
}
|
2022-05-04 05:10:05 +08:00
|
|
|
|
|
|
|
/* Successfully activated all the zones */
|
2022-07-16 03:45:24 +08:00
|
|
|
set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags);
|
btrfs: zoned: count fresh BG region as zone unusable
The naming of space_info->active_total_bytes is misleading. It counts
not only active block groups but also full ones which are previously
active but now inactive. That confusion results in a bug not counting
the full BGs into active_total_bytes on mount time.
For a background, there are three kinds of block groups in terms of
activation.
1. Block groups never activated
2. Block groups currently active
3. Block groups previously active and currently inactive (due to fully
written or zone finish)
What we really wanted to exclude from "total_bytes" is the total size of
BGs #1. They seem empty and allocatable but since they are not activated,
we cannot rely on them to do the space reservation.
And, since BGs #1 never get activated, they should have no "used",
"reserved" and "pinned" bytes.
OTOH, BGs #3 can be counted in the "total", since they are already full
we cannot allocate from them anyway. For them, "total_bytes == used +
reserved + pinned + zone_unusable" should hold.
Tracking #2 and #3 as "active_total_bytes" (current implementation) is
confusing. And, tracking #1 and subtract that properly from "total_bytes"
every time you need space reservation is cumbersome.
Instead, we can count the whole region of a newly allocated block group as
zone_unusable. Then, once that block group is activated, release
[0 .. zone_capacity] from the zone_unusable counters. With this, we can
eliminate the confusing ->active_total_bytes and the code will be common
among regular and the zoned mode. Also, no additional counter is needed
with this approach.
Fixes: 6a921de58992 ("btrfs: zoned: introduce space_info->active_total_bytes")
CC: stable@vger.kernel.org # 6.1+
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2023-03-13 15:06:13 +08:00
|
|
|
WARN_ON(block_group->alloc_offset != 0);
|
|
|
|
if (block_group->zone_unusable == block_group->length) {
|
|
|
|
block_group->zone_unusable = block_group->length - block_group->zone_capacity;
|
|
|
|
space_info->bytes_zone_unusable -= block_group->zone_capacity;
|
|
|
|
}
|
2021-08-19 20:19:17 +08:00
|
|
|
spin_unlock(&block_group->lock);
|
2022-07-09 07:18:45 +08:00
|
|
|
btrfs_try_granting_tickets(fs_info, space_info);
|
|
|
|
spin_unlock(&space_info->lock);
|
2021-08-19 20:19:17 +08:00
|
|
|
|
2022-05-04 05:10:05 +08:00
|
|
|
/* For the active block group list */
|
|
|
|
btrfs_get_block_group(block_group);
|
2021-08-19 20:19:17 +08:00
|
|
|
|
2022-05-04 05:10:05 +08:00
|
|
|
spin_lock(&fs_info->zone_active_bgs_lock);
|
|
|
|
list_add_tail(&block_group->active_bg_list, &fs_info->zone_active_bgs);
|
|
|
|
spin_unlock(&fs_info->zone_active_bgs_lock);
|
2021-08-19 20:19:17 +08:00
|
|
|
|
|
|
|
return true;
|
|
|
|
|
|
|
|
out_unlock:
|
|
|
|
spin_unlock(&block_group->lock);
|
2022-07-09 07:18:45 +08:00
|
|
|
spin_unlock(&space_info->lock);
|
2021-08-19 20:19:17 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2022-09-09 14:59:55 +08:00
|
|
|
static void wait_eb_writebacks(struct btrfs_block_group *block_group)
|
|
|
|
{
|
|
|
|
struct btrfs_fs_info *fs_info = block_group->fs_info;
|
|
|
|
const u64 end = block_group->start + block_group->length;
|
|
|
|
struct radix_tree_iter iter;
|
|
|
|
struct extent_buffer *eb;
|
|
|
|
void __rcu **slot;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
radix_tree_for_each_slot(slot, &fs_info->buffer_radix, &iter,
|
|
|
|
block_group->start >> fs_info->sectorsize_bits) {
|
|
|
|
eb = radix_tree_deref_slot(slot);
|
|
|
|
if (!eb)
|
|
|
|
continue;
|
|
|
|
if (radix_tree_deref_retry(eb)) {
|
|
|
|
slot = radix_tree_iter_retry(&iter);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (eb->start < block_group->start)
|
|
|
|
continue;
|
|
|
|
if (eb->start >= end)
|
|
|
|
break;
|
|
|
|
|
|
|
|
slot = radix_tree_iter_resume(slot, &iter);
|
|
|
|
rcu_read_unlock();
|
|
|
|
wait_on_extent_buffer_writeback(eb);
|
|
|
|
rcu_read_lock();
|
|
|
|
}
|
|
|
|
rcu_read_unlock();
|
|
|
|
}
|
|
|
|
|
2022-05-04 08:48:51 +08:00
|
|
|
static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_written)
|
2021-08-19 20:19:17 +08:00
|
|
|
{
|
|
|
|
struct btrfs_fs_info *fs_info = block_group->fs_info;
|
|
|
|
struct map_lookup *map;
|
2022-09-09 14:59:55 +08:00
|
|
|
const bool is_metadata = (block_group->flags &
|
|
|
|
(BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_SYSTEM));
|
2021-08-19 20:19:17 +08:00
|
|
|
int ret = 0;
|
2022-01-26 21:46:21 +08:00
|
|
|
int i;
|
2021-08-19 20:19:17 +08:00
|
|
|
|
|
|
|
spin_lock(&block_group->lock);
|
2022-07-16 03:45:24 +08:00
|
|
|
if (!test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags)) {
|
2021-08-19 20:19:17 +08:00
|
|
|
spin_unlock(&block_group->lock);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check if we have unwritten allocated space */
|
2022-09-09 14:59:55 +08:00
|
|
|
if (is_metadata &&
|
2022-05-05 07:12:48 +08:00
|
|
|
block_group->start + block_group->alloc_offset > block_group->meta_write_pointer) {
|
2021-08-19 20:19:17 +08:00
|
|
|
spin_unlock(&block_group->lock);
|
|
|
|
return -EAGAIN;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2022-05-04 08:48:51 +08:00
|
|
|
* If we are sure that the block group is full (= no more room left for
|
|
|
|
* new allocation) and the IO for the last usable block is completed, we
|
|
|
|
* don't need to wait for the other IOs. This holds because we ensure
|
|
|
|
* the sequential IO submissions using the ZONE_APPEND command for data
|
|
|
|
* and block_group->meta_write_pointer for metadata.
|
2021-08-19 20:19:17 +08:00
|
|
|
*/
|
2022-05-04 08:48:51 +08:00
|
|
|
if (!fully_written) {
|
2021-08-19 20:19:17 +08:00
|
|
|
spin_unlock(&block_group->lock);
|
|
|
|
|
2022-05-04 08:48:51 +08:00
|
|
|
ret = btrfs_inc_block_group_ro(block_group, false);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
/* Ensure all writes in this block group finish */
|
|
|
|
btrfs_wait_block_group_reservations(block_group);
|
|
|
|
/* No need to wait for NOCOW writers. Zoned mode does not allow that */
|
|
|
|
btrfs_wait_ordered_roots(fs_info, U64_MAX, block_group->start,
|
|
|
|
block_group->length);
|
2022-09-09 14:59:55 +08:00
|
|
|
/* Wait for extent buffers to be written. */
|
|
|
|
if (is_metadata)
|
|
|
|
wait_eb_writebacks(block_group);
|
2022-05-04 08:48:51 +08:00
|
|
|
|
|
|
|
spin_lock(&block_group->lock);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Bail out if someone already deactivated the block group, or
|
|
|
|
* allocated space is left in the block group.
|
|
|
|
*/
|
2022-07-16 03:45:24 +08:00
|
|
|
if (!test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE,
|
|
|
|
&block_group->runtime_flags)) {
|
2022-05-04 08:48:51 +08:00
|
|
|
spin_unlock(&block_group->lock);
|
|
|
|
btrfs_dec_block_group_ro(block_group);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (block_group->reserved) {
|
|
|
|
spin_unlock(&block_group->lock);
|
|
|
|
btrfs_dec_block_group_ro(block_group);
|
|
|
|
return -EAGAIN;
|
|
|
|
}
|
2021-08-19 20:19:17 +08:00
|
|
|
}
|
|
|
|
|
2022-07-16 03:45:24 +08:00
|
|
|
clear_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags);
|
2021-08-19 20:19:17 +08:00
|
|
|
block_group->alloc_offset = block_group->zone_capacity;
|
|
|
|
block_group->free_space_ctl->free_space = 0;
|
|
|
|
btrfs_clear_treelog_bg(block_group);
|
2021-12-02 16:47:14 +08:00
|
|
|
btrfs_clear_data_reloc_bg(block_group);
|
2021-08-19 20:19:17 +08:00
|
|
|
spin_unlock(&block_group->lock);
|
|
|
|
|
2022-05-04 08:48:51 +08:00
|
|
|
map = block_group->physical_map;
|
2022-01-26 21:46:21 +08:00
|
|
|
for (i = 0; i < map->num_stripes; i++) {
|
2022-05-04 08:48:51 +08:00
|
|
|
struct btrfs_device *device = map->stripes[i].dev;
|
|
|
|
const u64 physical = map->stripes[i].physical;
|
2021-08-19 20:19:17 +08:00
|
|
|
|
2022-01-26 21:46:21 +08:00
|
|
|
if (device->zone_info->max_active_zones == 0)
|
|
|
|
continue;
|
2021-08-19 20:19:17 +08:00
|
|
|
|
btrfs: zoned: drop optimization of zone finish
We have an optimization in do_zone_finish() to send REQ_OP_ZONE_FINISH only
when necessary, i.e. we don't send REQ_OP_ZONE_FINISH when we assume we
wrote fully into the zone.
The assumption is determined by "alloc_offset == capacity". This condition
won't work if the last ordered extent is canceled due to some errors. In
that case, we consider the zone is deactivated without sending the finish
command while it's still active.
This inconstancy results in activating another block group while we cannot
really activate the underlying zone, which causes the active zone exceeds
errors like below.
BTRFS error (device nvme3n2): allocation failed flags 1, wanted 520192 tree-log 0, relocation: 0
nvme3n2: I/O Cmd(0x7d) @ LBA 160432128, 127 blocks, I/O Error (sct 0x1 / sc 0xbd) MORE DNR
active zones exceeded error, dev nvme3n2, sector 0 op 0xd:(ZONE_APPEND) flags 0x4800 phys_seg 1 prio class 0
nvme3n2: I/O Cmd(0x7d) @ LBA 160432128, 127 blocks, I/O Error (sct 0x1 / sc 0xbd) MORE DNR
active zones exceeded error, dev nvme3n2, sector 0 op 0xd:(ZONE_APPEND) flags 0x4800 phys_seg 1 prio class 0
Fix the issue by removing the optimization for now.
Fixes: 8376d9e1ed8f ("btrfs: zoned: finish superblock zone once no space left for new SB")
Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2022-06-29 10:00:38 +08:00
|
|
|
ret = blkdev_zone_mgmt(device->bdev, REQ_OP_ZONE_FINISH,
|
|
|
|
physical >> SECTOR_SHIFT,
|
|
|
|
device->zone_info->zone_size >> SECTOR_SHIFT,
|
|
|
|
GFP_NOFS);
|
2022-01-26 21:46:21 +08:00
|
|
|
|
btrfs: zoned: drop optimization of zone finish
We have an optimization in do_zone_finish() to send REQ_OP_ZONE_FINISH only
when necessary, i.e. we don't send REQ_OP_ZONE_FINISH when we assume we
wrote fully into the zone.
The assumption is determined by "alloc_offset == capacity". This condition
won't work if the last ordered extent is canceled due to some errors. In
that case, we consider the zone is deactivated without sending the finish
command while it's still active.
This inconstancy results in activating another block group while we cannot
really activate the underlying zone, which causes the active zone exceeds
errors like below.
BTRFS error (device nvme3n2): allocation failed flags 1, wanted 520192 tree-log 0, relocation: 0
nvme3n2: I/O Cmd(0x7d) @ LBA 160432128, 127 blocks, I/O Error (sct 0x1 / sc 0xbd) MORE DNR
active zones exceeded error, dev nvme3n2, sector 0 op 0xd:(ZONE_APPEND) flags 0x4800 phys_seg 1 prio class 0
nvme3n2: I/O Cmd(0x7d) @ LBA 160432128, 127 blocks, I/O Error (sct 0x1 / sc 0xbd) MORE DNR
active zones exceeded error, dev nvme3n2, sector 0 op 0xd:(ZONE_APPEND) flags 0x4800 phys_seg 1 prio class 0
Fix the issue by removing the optimization for now.
Fixes: 8376d9e1ed8f ("btrfs: zoned: finish superblock zone once no space left for new SB")
Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2022-06-29 10:00:38 +08:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
2021-08-19 20:19:17 +08:00
|
|
|
|
2022-01-26 21:46:21 +08:00
|
|
|
btrfs_dev_clear_active_zone(device, physical);
|
2021-08-19 20:19:17 +08:00
|
|
|
}
|
2022-05-04 08:48:51 +08:00
|
|
|
|
|
|
|
if (!fully_written)
|
|
|
|
btrfs_dec_block_group_ro(block_group);
|
2021-08-19 20:19:17 +08:00
|
|
|
|
2022-01-26 21:46:21 +08:00
|
|
|
spin_lock(&fs_info->zone_active_bgs_lock);
|
|
|
|
ASSERT(!list_empty(&block_group->active_bg_list));
|
|
|
|
list_del_init(&block_group->active_bg_list);
|
|
|
|
spin_unlock(&fs_info->zone_active_bgs_lock);
|
|
|
|
|
|
|
|
/* For active_bg_list */
|
|
|
|
btrfs_put_block_group(block_group);
|
|
|
|
|
2022-08-31 12:55:48 +08:00
|
|
|
clear_and_wake_up_bit(BTRFS_FS_NEED_ZONE_FINISH, &fs_info->flags);
|
2022-07-09 07:18:50 +08:00
|
|
|
|
2022-01-26 21:46:21 +08:00
|
|
|
return 0;
|
2021-08-19 20:19:17 +08:00
|
|
|
}
|
2021-08-19 20:19:22 +08:00
|
|
|
|
2022-05-04 08:48:51 +08:00
|
|
|
int btrfs_zone_finish(struct btrfs_block_group *block_group)
|
|
|
|
{
|
|
|
|
if (!btrfs_is_zoned(block_group->fs_info))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return do_zone_finish(block_group, false);
|
|
|
|
}
|
|
|
|
|
btrfs: zoned: fix chunk allocation condition for zoned allocator
The ZNS specification defines a limit on the number of "active"
zones. That limit impose us to limit the number of block groups which
can be used for an allocation at the same time. Not to exceed the
limit, we reuse the existing active block groups as much as possible
when we can't activate any other zones without sacrificing an already
activated block group in commit a85f05e59bc1 ("btrfs: zoned: avoid
chunk allocation if active block group has enough space").
However, the check is wrong in two ways. First, it checks the
condition for every raid index (ffe_ctl->index). Even if it reaches
the condition and "ffe_ctl->max_extent_size >=
ffe_ctl->min_alloc_size" is met, there can be other block groups
having enough space to hold ffe_ctl->num_bytes. (Actually, this won't
happen in the current zoned code as it only supports SINGLE
profile. But, it can happen once it enables other RAID types.)
Second, it checks the active zone availability depending on the
raid index. The raid index is just an index for
space_info->block_groups, so it has nothing to do with chunk allocation.
These mistakes are causing a faulty allocation in a certain
situation. Consider we are running zoned btrfs on a device whose
max_active_zone == 0 (no limit). And, suppose no block group have a
room to fit ffe_ctl->num_bytes but some room to meet
ffe_ctl->min_alloc_size (i.e. max_extent_size > num_bytes >=
min_alloc_size).
In this situation, the following occur:
- With SINGLE raid_index, it reaches the chunk allocation checking
code
- The check returns true because we can activate a new zone (no limit)
- But, before allocating the chunk, it iterates to the next raid index
(RAID5)
- Since there are no RAID5 block groups on zoned mode, it again
reaches the check code
- The check returns false because of btrfs_can_activate_zone()'s "if
(raid_index != BTRFS_RAID_SINGLE)" part
- That results in returning -ENOSPC without allocating a new chunk
As a result, we end up hitting -ENOSPC too early.
Move the check to the right place in the can_allocate_chunk() hook,
and do the active zone check depending on the allocation flag, not on
the raid index.
CC: stable@vger.kernel.org # 5.16
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2021-12-07 23:35:49 +08:00
|
|
|
bool btrfs_can_activate_zone(struct btrfs_fs_devices *fs_devices, u64 flags)
|
2021-08-19 20:19:22 +08:00
|
|
|
{
|
btrfs: zoned: traverse devices under chunk_mutex in btrfs_can_activate_zone
btrfs_can_activate_zone() can be called with the device_list_mutex already
held, which will lead to a deadlock:
insert_dev_extents() // Takes device_list_mutex
`-> insert_dev_extent()
`-> btrfs_insert_empty_item()
`-> btrfs_insert_empty_items()
`-> btrfs_search_slot()
`-> btrfs_cow_block()
`-> __btrfs_cow_block()
`-> btrfs_alloc_tree_block()
`-> btrfs_reserve_extent()
`-> find_free_extent()
`-> find_free_extent_update_loop()
`-> can_allocate_chunk()
`-> btrfs_can_activate_zone() // Takes device_list_mutex again
Instead of using the RCU on fs_devices->device_list we
can use fs_devices->alloc_list, protected by the chunk_mutex to traverse
the list of active devices.
We are in the chunk allocation thread. The newer chunk allocation
happens from the devices in the fs_device->alloc_list protected by the
chunk_mutex.
btrfs_create_chunk()
lockdep_assert_held(&info->chunk_mutex);
gather_device_info
list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list)
Also, a device that reappears after the mount won't join the alloc_list
yet and, it will be in the dev_list, which we don't want to consider in
the context of the chunk alloc.
[15.166572] WARNING: possible recursive locking detected
[15.167117] 5.17.0-rc6-dennis #79 Not tainted
[15.167487] --------------------------------------------
[15.167733] kworker/u8:3/146 is trying to acquire lock:
[15.167733] ffff888102962ee0 (&fs_devs->device_list_mutex){+.+.}-{3:3}, at: find_free_extent+0x15a/0x14f0 [btrfs]
[15.167733]
[15.167733] but task is already holding lock:
[15.167733] ffff888102962ee0 (&fs_devs->device_list_mutex){+.+.}-{3:3}, at: btrfs_create_pending_block_groups+0x20a/0x560 [btrfs]
[15.167733]
[15.167733] other info that might help us debug this:
[15.167733] Possible unsafe locking scenario:
[15.167733]
[15.171834] CPU0
[15.171834] ----
[15.171834] lock(&fs_devs->device_list_mutex);
[15.171834] lock(&fs_devs->device_list_mutex);
[15.171834]
[15.171834] *** DEADLOCK ***
[15.171834]
[15.171834] May be due to missing lock nesting notation
[15.171834]
[15.171834] 5 locks held by kworker/u8:3/146:
[15.171834] #0: ffff888100050938 ((wq_completion)events_unbound){+.+.}-{0:0}, at: process_one_work+0x1c3/0x5a0
[15.171834] #1: ffffc9000067be80 ((work_completion)(&fs_info->async_data_reclaim_work)){+.+.}-{0:0}, at: process_one_work+0x1c3/0x5a0
[15.176244] #2: ffff88810521e620 (sb_internal){.+.+}-{0:0}, at: flush_space+0x335/0x600 [btrfs]
[15.176244] #3: ffff888102962ee0 (&fs_devs->device_list_mutex){+.+.}-{3:3}, at: btrfs_create_pending_block_groups+0x20a/0x560 [btrfs]
[15.176244] #4: ffff8881152e4b78 (btrfs-dev-00){++++}-{3:3}, at: __btrfs_tree_lock+0x27/0x130 [btrfs]
[15.179641]
[15.179641] stack backtrace:
[15.179641] CPU: 1 PID: 146 Comm: kworker/u8:3 Not tainted 5.17.0-rc6-dennis #79
[15.179641] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.15.0-1.fc35 04/01/2014
[15.179641] Workqueue: events_unbound btrfs_async_reclaim_data_space [btrfs]
[15.179641] Call Trace:
[15.179641] <TASK>
[15.179641] dump_stack_lvl+0x45/0x59
[15.179641] __lock_acquire.cold+0x217/0x2b2
[15.179641] lock_acquire+0xbf/0x2b0
[15.183838] ? find_free_extent+0x15a/0x14f0 [btrfs]
[15.183838] __mutex_lock+0x8e/0x970
[15.183838] ? find_free_extent+0x15a/0x14f0 [btrfs]
[15.183838] ? find_free_extent+0x15a/0x14f0 [btrfs]
[15.183838] ? lock_is_held_type+0xd7/0x130
[15.183838] ? find_free_extent+0x15a/0x14f0 [btrfs]
[15.183838] find_free_extent+0x15a/0x14f0 [btrfs]
[15.183838] ? _raw_spin_unlock+0x24/0x40
[15.183838] ? btrfs_get_alloc_profile+0x106/0x230 [btrfs]
[15.187601] btrfs_reserve_extent+0x131/0x260 [btrfs]
[15.187601] btrfs_alloc_tree_block+0xb5/0x3b0 [btrfs]
[15.187601] __btrfs_cow_block+0x138/0x600 [btrfs]
[15.187601] btrfs_cow_block+0x10f/0x230 [btrfs]
[15.187601] btrfs_search_slot+0x55f/0xbc0 [btrfs]
[15.187601] ? lock_is_held_type+0xd7/0x130
[15.187601] btrfs_insert_empty_items+0x2d/0x60 [btrfs]
[15.187601] btrfs_create_pending_block_groups+0x2b3/0x560 [btrfs]
[15.187601] __btrfs_end_transaction+0x36/0x2a0 [btrfs]
[15.192037] flush_space+0x374/0x600 [btrfs]
[15.192037] ? find_held_lock+0x2b/0x80
[15.192037] ? btrfs_async_reclaim_data_space+0x49/0x180 [btrfs]
[15.192037] ? lock_release+0x131/0x2b0
[15.192037] btrfs_async_reclaim_data_space+0x70/0x180 [btrfs]
[15.192037] process_one_work+0x24c/0x5a0
[15.192037] worker_thread+0x4a/0x3d0
Fixes: a85f05e59bc1 ("btrfs: zoned: avoid chunk allocation if active block group has enough space")
CC: stable@vger.kernel.org # 5.16+
Reviewed-by: Anand Jain <anand.jain@oracle.com>
Signed-off-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2022-03-07 18:47:17 +08:00
|
|
|
struct btrfs_fs_info *fs_info = fs_devices->fs_info;
|
2021-08-19 20:19:22 +08:00
|
|
|
struct btrfs_device *device;
|
|
|
|
bool ret = false;
|
|
|
|
|
btrfs: zoned: traverse devices under chunk_mutex in btrfs_can_activate_zone
btrfs_can_activate_zone() can be called with the device_list_mutex already
held, which will lead to a deadlock:
insert_dev_extents() // Takes device_list_mutex
`-> insert_dev_extent()
`-> btrfs_insert_empty_item()
`-> btrfs_insert_empty_items()
`-> btrfs_search_slot()
`-> btrfs_cow_block()
`-> __btrfs_cow_block()
`-> btrfs_alloc_tree_block()
`-> btrfs_reserve_extent()
`-> find_free_extent()
`-> find_free_extent_update_loop()
`-> can_allocate_chunk()
`-> btrfs_can_activate_zone() // Takes device_list_mutex again
Instead of using the RCU on fs_devices->device_list we
can use fs_devices->alloc_list, protected by the chunk_mutex to traverse
the list of active devices.
We are in the chunk allocation thread. The newer chunk allocation
happens from the devices in the fs_device->alloc_list protected by the
chunk_mutex.
btrfs_create_chunk()
lockdep_assert_held(&info->chunk_mutex);
gather_device_info
list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list)
Also, a device that reappears after the mount won't join the alloc_list
yet and, it will be in the dev_list, which we don't want to consider in
the context of the chunk alloc.
[15.166572] WARNING: possible recursive locking detected
[15.167117] 5.17.0-rc6-dennis #79 Not tainted
[15.167487] --------------------------------------------
[15.167733] kworker/u8:3/146 is trying to acquire lock:
[15.167733] ffff888102962ee0 (&fs_devs->device_list_mutex){+.+.}-{3:3}, at: find_free_extent+0x15a/0x14f0 [btrfs]
[15.167733]
[15.167733] but task is already holding lock:
[15.167733] ffff888102962ee0 (&fs_devs->device_list_mutex){+.+.}-{3:3}, at: btrfs_create_pending_block_groups+0x20a/0x560 [btrfs]
[15.167733]
[15.167733] other info that might help us debug this:
[15.167733] Possible unsafe locking scenario:
[15.167733]
[15.171834] CPU0
[15.171834] ----
[15.171834] lock(&fs_devs->device_list_mutex);
[15.171834] lock(&fs_devs->device_list_mutex);
[15.171834]
[15.171834] *** DEADLOCK ***
[15.171834]
[15.171834] May be due to missing lock nesting notation
[15.171834]
[15.171834] 5 locks held by kworker/u8:3/146:
[15.171834] #0: ffff888100050938 ((wq_completion)events_unbound){+.+.}-{0:0}, at: process_one_work+0x1c3/0x5a0
[15.171834] #1: ffffc9000067be80 ((work_completion)(&fs_info->async_data_reclaim_work)){+.+.}-{0:0}, at: process_one_work+0x1c3/0x5a0
[15.176244] #2: ffff88810521e620 (sb_internal){.+.+}-{0:0}, at: flush_space+0x335/0x600 [btrfs]
[15.176244] #3: ffff888102962ee0 (&fs_devs->device_list_mutex){+.+.}-{3:3}, at: btrfs_create_pending_block_groups+0x20a/0x560 [btrfs]
[15.176244] #4: ffff8881152e4b78 (btrfs-dev-00){++++}-{3:3}, at: __btrfs_tree_lock+0x27/0x130 [btrfs]
[15.179641]
[15.179641] stack backtrace:
[15.179641] CPU: 1 PID: 146 Comm: kworker/u8:3 Not tainted 5.17.0-rc6-dennis #79
[15.179641] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.15.0-1.fc35 04/01/2014
[15.179641] Workqueue: events_unbound btrfs_async_reclaim_data_space [btrfs]
[15.179641] Call Trace:
[15.179641] <TASK>
[15.179641] dump_stack_lvl+0x45/0x59
[15.179641] __lock_acquire.cold+0x217/0x2b2
[15.179641] lock_acquire+0xbf/0x2b0
[15.183838] ? find_free_extent+0x15a/0x14f0 [btrfs]
[15.183838] __mutex_lock+0x8e/0x970
[15.183838] ? find_free_extent+0x15a/0x14f0 [btrfs]
[15.183838] ? find_free_extent+0x15a/0x14f0 [btrfs]
[15.183838] ? lock_is_held_type+0xd7/0x130
[15.183838] ? find_free_extent+0x15a/0x14f0 [btrfs]
[15.183838] find_free_extent+0x15a/0x14f0 [btrfs]
[15.183838] ? _raw_spin_unlock+0x24/0x40
[15.183838] ? btrfs_get_alloc_profile+0x106/0x230 [btrfs]
[15.187601] btrfs_reserve_extent+0x131/0x260 [btrfs]
[15.187601] btrfs_alloc_tree_block+0xb5/0x3b0 [btrfs]
[15.187601] __btrfs_cow_block+0x138/0x600 [btrfs]
[15.187601] btrfs_cow_block+0x10f/0x230 [btrfs]
[15.187601] btrfs_search_slot+0x55f/0xbc0 [btrfs]
[15.187601] ? lock_is_held_type+0xd7/0x130
[15.187601] btrfs_insert_empty_items+0x2d/0x60 [btrfs]
[15.187601] btrfs_create_pending_block_groups+0x2b3/0x560 [btrfs]
[15.187601] __btrfs_end_transaction+0x36/0x2a0 [btrfs]
[15.192037] flush_space+0x374/0x600 [btrfs]
[15.192037] ? find_held_lock+0x2b/0x80
[15.192037] ? btrfs_async_reclaim_data_space+0x49/0x180 [btrfs]
[15.192037] ? lock_release+0x131/0x2b0
[15.192037] btrfs_async_reclaim_data_space+0x70/0x180 [btrfs]
[15.192037] process_one_work+0x24c/0x5a0
[15.192037] worker_thread+0x4a/0x3d0
Fixes: a85f05e59bc1 ("btrfs: zoned: avoid chunk allocation if active block group has enough space")
CC: stable@vger.kernel.org # 5.16+
Reviewed-by: Anand Jain <anand.jain@oracle.com>
Signed-off-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2022-03-07 18:47:17 +08:00
|
|
|
if (!btrfs_is_zoned(fs_info))
|
2021-08-19 20:19:22 +08:00
|
|
|
return true;
|
|
|
|
|
|
|
|
/* Check if there is a device with active zones left */
|
btrfs: zoned: traverse devices under chunk_mutex in btrfs_can_activate_zone
btrfs_can_activate_zone() can be called with the device_list_mutex already
held, which will lead to a deadlock:
insert_dev_extents() // Takes device_list_mutex
`-> insert_dev_extent()
`-> btrfs_insert_empty_item()
`-> btrfs_insert_empty_items()
`-> btrfs_search_slot()
`-> btrfs_cow_block()
`-> __btrfs_cow_block()
`-> btrfs_alloc_tree_block()
`-> btrfs_reserve_extent()
`-> find_free_extent()
`-> find_free_extent_update_loop()
`-> can_allocate_chunk()
`-> btrfs_can_activate_zone() // Takes device_list_mutex again
Instead of using the RCU on fs_devices->device_list we
can use fs_devices->alloc_list, protected by the chunk_mutex to traverse
the list of active devices.
We are in the chunk allocation thread. The newer chunk allocation
happens from the devices in the fs_device->alloc_list protected by the
chunk_mutex.
btrfs_create_chunk()
lockdep_assert_held(&info->chunk_mutex);
gather_device_info
list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list)
Also, a device that reappears after the mount won't join the alloc_list
yet and, it will be in the dev_list, which we don't want to consider in
the context of the chunk alloc.
[15.166572] WARNING: possible recursive locking detected
[15.167117] 5.17.0-rc6-dennis #79 Not tainted
[15.167487] --------------------------------------------
[15.167733] kworker/u8:3/146 is trying to acquire lock:
[15.167733] ffff888102962ee0 (&fs_devs->device_list_mutex){+.+.}-{3:3}, at: find_free_extent+0x15a/0x14f0 [btrfs]
[15.167733]
[15.167733] but task is already holding lock:
[15.167733] ffff888102962ee0 (&fs_devs->device_list_mutex){+.+.}-{3:3}, at: btrfs_create_pending_block_groups+0x20a/0x560 [btrfs]
[15.167733]
[15.167733] other info that might help us debug this:
[15.167733] Possible unsafe locking scenario:
[15.167733]
[15.171834] CPU0
[15.171834] ----
[15.171834] lock(&fs_devs->device_list_mutex);
[15.171834] lock(&fs_devs->device_list_mutex);
[15.171834]
[15.171834] *** DEADLOCK ***
[15.171834]
[15.171834] May be due to missing lock nesting notation
[15.171834]
[15.171834] 5 locks held by kworker/u8:3/146:
[15.171834] #0: ffff888100050938 ((wq_completion)events_unbound){+.+.}-{0:0}, at: process_one_work+0x1c3/0x5a0
[15.171834] #1: ffffc9000067be80 ((work_completion)(&fs_info->async_data_reclaim_work)){+.+.}-{0:0}, at: process_one_work+0x1c3/0x5a0
[15.176244] #2: ffff88810521e620 (sb_internal){.+.+}-{0:0}, at: flush_space+0x335/0x600 [btrfs]
[15.176244] #3: ffff888102962ee0 (&fs_devs->device_list_mutex){+.+.}-{3:3}, at: btrfs_create_pending_block_groups+0x20a/0x560 [btrfs]
[15.176244] #4: ffff8881152e4b78 (btrfs-dev-00){++++}-{3:3}, at: __btrfs_tree_lock+0x27/0x130 [btrfs]
[15.179641]
[15.179641] stack backtrace:
[15.179641] CPU: 1 PID: 146 Comm: kworker/u8:3 Not tainted 5.17.0-rc6-dennis #79
[15.179641] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.15.0-1.fc35 04/01/2014
[15.179641] Workqueue: events_unbound btrfs_async_reclaim_data_space [btrfs]
[15.179641] Call Trace:
[15.179641] <TASK>
[15.179641] dump_stack_lvl+0x45/0x59
[15.179641] __lock_acquire.cold+0x217/0x2b2
[15.179641] lock_acquire+0xbf/0x2b0
[15.183838] ? find_free_extent+0x15a/0x14f0 [btrfs]
[15.183838] __mutex_lock+0x8e/0x970
[15.183838] ? find_free_extent+0x15a/0x14f0 [btrfs]
[15.183838] ? find_free_extent+0x15a/0x14f0 [btrfs]
[15.183838] ? lock_is_held_type+0xd7/0x130
[15.183838] ? find_free_extent+0x15a/0x14f0 [btrfs]
[15.183838] find_free_extent+0x15a/0x14f0 [btrfs]
[15.183838] ? _raw_spin_unlock+0x24/0x40
[15.183838] ? btrfs_get_alloc_profile+0x106/0x230 [btrfs]
[15.187601] btrfs_reserve_extent+0x131/0x260 [btrfs]
[15.187601] btrfs_alloc_tree_block+0xb5/0x3b0 [btrfs]
[15.187601] __btrfs_cow_block+0x138/0x600 [btrfs]
[15.187601] btrfs_cow_block+0x10f/0x230 [btrfs]
[15.187601] btrfs_search_slot+0x55f/0xbc0 [btrfs]
[15.187601] ? lock_is_held_type+0xd7/0x130
[15.187601] btrfs_insert_empty_items+0x2d/0x60 [btrfs]
[15.187601] btrfs_create_pending_block_groups+0x2b3/0x560 [btrfs]
[15.187601] __btrfs_end_transaction+0x36/0x2a0 [btrfs]
[15.192037] flush_space+0x374/0x600 [btrfs]
[15.192037] ? find_held_lock+0x2b/0x80
[15.192037] ? btrfs_async_reclaim_data_space+0x49/0x180 [btrfs]
[15.192037] ? lock_release+0x131/0x2b0
[15.192037] btrfs_async_reclaim_data_space+0x70/0x180 [btrfs]
[15.192037] process_one_work+0x24c/0x5a0
[15.192037] worker_thread+0x4a/0x3d0
Fixes: a85f05e59bc1 ("btrfs: zoned: avoid chunk allocation if active block group has enough space")
CC: stable@vger.kernel.org # 5.16+
Reviewed-by: Anand Jain <anand.jain@oracle.com>
Signed-off-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2022-03-07 18:47:17 +08:00
|
|
|
mutex_lock(&fs_info->chunk_mutex);
|
|
|
|
list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
|
2021-08-19 20:19:22 +08:00
|
|
|
struct btrfs_zoned_device_info *zinfo = device->zone_info;
|
|
|
|
|
|
|
|
if (!device->bdev)
|
|
|
|
continue;
|
|
|
|
|
2023-03-13 15:29:49 +08:00
|
|
|
if (!zinfo->max_active_zones) {
|
2021-08-19 20:19:22 +08:00
|
|
|
ret = true;
|
|
|
|
break;
|
|
|
|
}
|
2023-03-13 15:29:49 +08:00
|
|
|
|
|
|
|
switch (flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
|
|
|
|
case 0: /* single */
|
|
|
|
ret = (atomic_read(&zinfo->active_zones_left) >= 1);
|
|
|
|
break;
|
|
|
|
case BTRFS_BLOCK_GROUP_DUP:
|
|
|
|
ret = (atomic_read(&zinfo->active_zones_left) >= 2);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (ret)
|
|
|
|
break;
|
2021-08-19 20:19:22 +08:00
|
|
|
}
|
btrfs: zoned: traverse devices under chunk_mutex in btrfs_can_activate_zone
btrfs_can_activate_zone() can be called with the device_list_mutex already
held, which will lead to a deadlock:
insert_dev_extents() // Takes device_list_mutex
`-> insert_dev_extent()
`-> btrfs_insert_empty_item()
`-> btrfs_insert_empty_items()
`-> btrfs_search_slot()
`-> btrfs_cow_block()
`-> __btrfs_cow_block()
`-> btrfs_alloc_tree_block()
`-> btrfs_reserve_extent()
`-> find_free_extent()
`-> find_free_extent_update_loop()
`-> can_allocate_chunk()
`-> btrfs_can_activate_zone() // Takes device_list_mutex again
Instead of using the RCU on fs_devices->device_list we
can use fs_devices->alloc_list, protected by the chunk_mutex to traverse
the list of active devices.
We are in the chunk allocation thread. The newer chunk allocation
happens from the devices in the fs_device->alloc_list protected by the
chunk_mutex.
btrfs_create_chunk()
lockdep_assert_held(&info->chunk_mutex);
gather_device_info
list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list)
Also, a device that reappears after the mount won't join the alloc_list
yet and, it will be in the dev_list, which we don't want to consider in
the context of the chunk alloc.
[15.166572] WARNING: possible recursive locking detected
[15.167117] 5.17.0-rc6-dennis #79 Not tainted
[15.167487] --------------------------------------------
[15.167733] kworker/u8:3/146 is trying to acquire lock:
[15.167733] ffff888102962ee0 (&fs_devs->device_list_mutex){+.+.}-{3:3}, at: find_free_extent+0x15a/0x14f0 [btrfs]
[15.167733]
[15.167733] but task is already holding lock:
[15.167733] ffff888102962ee0 (&fs_devs->device_list_mutex){+.+.}-{3:3}, at: btrfs_create_pending_block_groups+0x20a/0x560 [btrfs]
[15.167733]
[15.167733] other info that might help us debug this:
[15.167733] Possible unsafe locking scenario:
[15.167733]
[15.171834] CPU0
[15.171834] ----
[15.171834] lock(&fs_devs->device_list_mutex);
[15.171834] lock(&fs_devs->device_list_mutex);
[15.171834]
[15.171834] *** DEADLOCK ***
[15.171834]
[15.171834] May be due to missing lock nesting notation
[15.171834]
[15.171834] 5 locks held by kworker/u8:3/146:
[15.171834] #0: ffff888100050938 ((wq_completion)events_unbound){+.+.}-{0:0}, at: process_one_work+0x1c3/0x5a0
[15.171834] #1: ffffc9000067be80 ((work_completion)(&fs_info->async_data_reclaim_work)){+.+.}-{0:0}, at: process_one_work+0x1c3/0x5a0
[15.176244] #2: ffff88810521e620 (sb_internal){.+.+}-{0:0}, at: flush_space+0x335/0x600 [btrfs]
[15.176244] #3: ffff888102962ee0 (&fs_devs->device_list_mutex){+.+.}-{3:3}, at: btrfs_create_pending_block_groups+0x20a/0x560 [btrfs]
[15.176244] #4: ffff8881152e4b78 (btrfs-dev-00){++++}-{3:3}, at: __btrfs_tree_lock+0x27/0x130 [btrfs]
[15.179641]
[15.179641] stack backtrace:
[15.179641] CPU: 1 PID: 146 Comm: kworker/u8:3 Not tainted 5.17.0-rc6-dennis #79
[15.179641] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.15.0-1.fc35 04/01/2014
[15.179641] Workqueue: events_unbound btrfs_async_reclaim_data_space [btrfs]
[15.179641] Call Trace:
[15.179641] <TASK>
[15.179641] dump_stack_lvl+0x45/0x59
[15.179641] __lock_acquire.cold+0x217/0x2b2
[15.179641] lock_acquire+0xbf/0x2b0
[15.183838] ? find_free_extent+0x15a/0x14f0 [btrfs]
[15.183838] __mutex_lock+0x8e/0x970
[15.183838] ? find_free_extent+0x15a/0x14f0 [btrfs]
[15.183838] ? find_free_extent+0x15a/0x14f0 [btrfs]
[15.183838] ? lock_is_held_type+0xd7/0x130
[15.183838] ? find_free_extent+0x15a/0x14f0 [btrfs]
[15.183838] find_free_extent+0x15a/0x14f0 [btrfs]
[15.183838] ? _raw_spin_unlock+0x24/0x40
[15.183838] ? btrfs_get_alloc_profile+0x106/0x230 [btrfs]
[15.187601] btrfs_reserve_extent+0x131/0x260 [btrfs]
[15.187601] btrfs_alloc_tree_block+0xb5/0x3b0 [btrfs]
[15.187601] __btrfs_cow_block+0x138/0x600 [btrfs]
[15.187601] btrfs_cow_block+0x10f/0x230 [btrfs]
[15.187601] btrfs_search_slot+0x55f/0xbc0 [btrfs]
[15.187601] ? lock_is_held_type+0xd7/0x130
[15.187601] btrfs_insert_empty_items+0x2d/0x60 [btrfs]
[15.187601] btrfs_create_pending_block_groups+0x2b3/0x560 [btrfs]
[15.187601] __btrfs_end_transaction+0x36/0x2a0 [btrfs]
[15.192037] flush_space+0x374/0x600 [btrfs]
[15.192037] ? find_held_lock+0x2b/0x80
[15.192037] ? btrfs_async_reclaim_data_space+0x49/0x180 [btrfs]
[15.192037] ? lock_release+0x131/0x2b0
[15.192037] btrfs_async_reclaim_data_space+0x70/0x180 [btrfs]
[15.192037] process_one_work+0x24c/0x5a0
[15.192037] worker_thread+0x4a/0x3d0
Fixes: a85f05e59bc1 ("btrfs: zoned: avoid chunk allocation if active block group has enough space")
CC: stable@vger.kernel.org # 5.16+
Reviewed-by: Anand Jain <anand.jain@oracle.com>
Signed-off-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2022-03-07 18:47:17 +08:00
|
|
|
mutex_unlock(&fs_info->chunk_mutex);
|
2021-08-19 20:19:22 +08:00
|
|
|
|
2022-07-09 07:18:50 +08:00
|
|
|
if (!ret)
|
|
|
|
set_bit(BTRFS_FS_NEED_ZONE_FINISH, &fs_info->flags);
|
|
|
|
|
2021-08-19 20:19:22 +08:00
|
|
|
return ret;
|
|
|
|
}
|
2021-08-19 20:19:23 +08:00
|
|
|
|
|
|
|
void btrfs_zone_finish_endio(struct btrfs_fs_info *fs_info, u64 logical, u64 length)
|
|
|
|
{
|
|
|
|
struct btrfs_block_group *block_group;
|
2022-05-04 08:48:52 +08:00
|
|
|
u64 min_alloc_bytes;
|
2021-08-19 20:19:23 +08:00
|
|
|
|
|
|
|
if (!btrfs_is_zoned(fs_info))
|
|
|
|
return;
|
|
|
|
|
|
|
|
block_group = btrfs_lookup_block_group(fs_info, logical);
|
|
|
|
ASSERT(block_group);
|
|
|
|
|
2022-05-04 08:48:52 +08:00
|
|
|
/* No MIXED_BG on zoned btrfs. */
|
|
|
|
if (block_group->flags & BTRFS_BLOCK_GROUP_DATA)
|
|
|
|
min_alloc_bytes = fs_info->sectorsize;
|
|
|
|
else
|
|
|
|
min_alloc_bytes = fs_info->nodesize;
|
2021-08-19 20:19:23 +08:00
|
|
|
|
2022-05-04 08:48:52 +08:00
|
|
|
/* Bail out if we can allocate more data from this block group. */
|
|
|
|
if (logical + length + min_alloc_bytes <=
|
|
|
|
block_group->start + block_group->zone_capacity)
|
2021-08-19 20:19:23 +08:00
|
|
|
goto out;
|
|
|
|
|
2022-05-04 08:48:51 +08:00
|
|
|
do_zone_finish(block_group, true);
|
2021-08-19 20:19:23 +08:00
|
|
|
|
|
|
|
out:
|
|
|
|
btrfs_put_block_group(block_group);
|
|
|
|
}
|
|
|
|
|
2022-05-04 08:48:53 +08:00
|
|
|
static void btrfs_zone_finish_endio_workfn(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct btrfs_block_group *bg =
|
|
|
|
container_of(work, struct btrfs_block_group, zone_finish_work);
|
2021-08-19 20:19:23 +08:00
|
|
|
|
2022-05-04 08:48:53 +08:00
|
|
|
wait_on_extent_buffer_writeback(bg->last_eb);
|
|
|
|
free_extent_buffer(bg->last_eb);
|
|
|
|
btrfs_zone_finish_endio(bg->fs_info, bg->start, bg->length);
|
|
|
|
btrfs_put_block_group(bg);
|
|
|
|
}
|
2021-08-19 20:19:23 +08:00
|
|
|
|
2022-05-04 08:48:53 +08:00
|
|
|
void btrfs_schedule_zone_finish_bg(struct btrfs_block_group *bg,
|
|
|
|
struct extent_buffer *eb)
|
|
|
|
{
|
2022-11-01 03:33:46 +08:00
|
|
|
if (!test_bit(BLOCK_GROUP_FLAG_SEQUENTIAL_ZONE, &bg->runtime_flags) ||
|
|
|
|
eb->start + eb->len * 2 <= bg->start + bg->zone_capacity)
|
2022-05-04 08:48:53 +08:00
|
|
|
return;
|
2021-08-19 20:19:23 +08:00
|
|
|
|
2022-05-04 08:48:53 +08:00
|
|
|
if (WARN_ON(bg->zone_finish_work.func == btrfs_zone_finish_endio_workfn)) {
|
|
|
|
btrfs_err(bg->fs_info, "double scheduling of bg %llu zone finishing",
|
|
|
|
bg->start);
|
|
|
|
return;
|
|
|
|
}
|
2021-08-19 20:19:23 +08:00
|
|
|
|
2022-05-04 08:48:53 +08:00
|
|
|
/* For the work */
|
|
|
|
btrfs_get_block_group(bg);
|
|
|
|
atomic_inc(&eb->refs);
|
|
|
|
bg->last_eb = eb;
|
|
|
|
INIT_WORK(&bg->zone_finish_work, btrfs_zone_finish_endio_workfn);
|
|
|
|
queue_work(system_unbound_wq, &bg->zone_finish_work);
|
2021-08-19 20:19:23 +08:00
|
|
|
}
|
2021-09-09 00:19:26 +08:00
|
|
|
|
|
|
|
void btrfs_clear_data_reloc_bg(struct btrfs_block_group *bg)
|
|
|
|
{
|
|
|
|
struct btrfs_fs_info *fs_info = bg->fs_info;
|
|
|
|
|
|
|
|
spin_lock(&fs_info->relocation_bg_lock);
|
|
|
|
if (fs_info->data_reloc_bg == bg->start)
|
|
|
|
fs_info->data_reloc_bg = 0;
|
|
|
|
spin_unlock(&fs_info->relocation_bg_lock);
|
|
|
|
}
|
2021-11-11 13:14:38 +08:00
|
|
|
|
|
|
|
void btrfs_free_zone_cache(struct btrfs_fs_info *fs_info)
|
|
|
|
{
|
|
|
|
struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
|
|
|
|
struct btrfs_device *device;
|
|
|
|
|
|
|
|
if (!btrfs_is_zoned(fs_info))
|
|
|
|
return;
|
|
|
|
|
|
|
|
mutex_lock(&fs_devices->device_list_mutex);
|
|
|
|
list_for_each_entry(device, &fs_devices->devices, dev_list) {
|
|
|
|
if (device->zone_info) {
|
|
|
|
vfree(device->zone_info->zone_cache);
|
|
|
|
device->zone_info->zone_cache = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
mutex_unlock(&fs_devices->device_list_mutex);
|
|
|
|
}
|
2022-03-29 16:56:09 +08:00
|
|
|
|
|
|
|
bool btrfs_zoned_should_reclaim(struct btrfs_fs_info *fs_info)
|
|
|
|
{
|
|
|
|
struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
|
|
|
|
struct btrfs_device *device;
|
|
|
|
u64 used = 0;
|
|
|
|
u64 total = 0;
|
|
|
|
u64 factor;
|
|
|
|
|
|
|
|
ASSERT(btrfs_is_zoned(fs_info));
|
|
|
|
|
|
|
|
if (fs_info->bg_reclaim_threshold == 0)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
mutex_lock(&fs_devices->device_list_mutex);
|
|
|
|
list_for_each_entry(device, &fs_devices->devices, dev_list) {
|
|
|
|
if (!device->bdev)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
total += device->disk_total_bytes;
|
|
|
|
used += device->bytes_used;
|
|
|
|
}
|
|
|
|
mutex_unlock(&fs_devices->device_list_mutex);
|
|
|
|
|
|
|
|
factor = div64_u64(used * 100, total);
|
|
|
|
return factor >= fs_info->bg_reclaim_threshold;
|
|
|
|
}
|
btrfs: zoned: prevent allocation from previous data relocation BG
After commit 5f0addf7b890 ("btrfs: zoned: use dedicated lock for data
relocation"), we observe IO errors on e.g, btrfs/232 like below.
[09.0][T4038707] WARNING: CPU: 3 PID: 4038707 at fs/btrfs/extent-tree.c:2381 btrfs_cross_ref_exist+0xfc/0x120 [btrfs]
<snip>
[09.9][T4038707] Call Trace:
[09.5][T4038707] <TASK>
[09.3][T4038707] run_delalloc_nocow+0x7f1/0x11a0 [btrfs]
[09.6][T4038707] ? test_range_bit+0x174/0x320 [btrfs]
[09.2][T4038707] ? fallback_to_cow+0x980/0x980 [btrfs]
[09.3][T4038707] ? find_lock_delalloc_range+0x33e/0x3e0 [btrfs]
[09.5][T4038707] btrfs_run_delalloc_range+0x445/0x1320 [btrfs]
[09.2][T4038707] ? test_range_bit+0x320/0x320 [btrfs]
[09.4][T4038707] ? lock_downgrade+0x6a0/0x6a0
[09.2][T4038707] ? orc_find.part.0+0x1ed/0x300
[09.5][T4038707] ? __module_address.part.0+0x25/0x300
[09.0][T4038707] writepage_delalloc+0x159/0x310 [btrfs]
<snip>
[09.4][ C3] sd 10:0:1:0: [sde] tag#2620 FAILED Result: hostbyte=DID_OK driverbyte=DRIVER_OK cmd_age=0s
[09.5][ C3] sd 10:0:1:0: [sde] tag#2620 Sense Key : Illegal Request [current]
[09.9][ C3] sd 10:0:1:0: [sde] tag#2620 Add. Sense: Unaligned write command
[09.5][ C3] sd 10:0:1:0: [sde] tag#2620 CDB: Write(16) 8a 00 00 00 00 00 02 f3 63 87 00 00 00 2c 00 00
[09.4][ C3] critical target error, dev sde, sector 396041272 op 0x1:(WRITE) flags 0x800 phys_seg 3 prio class 0
[09.9][ C3] BTRFS error (device dm-1): bdev /dev/mapper/dml_102_2 errs: wr 1, rd 0, flush 0, corrupt 0, gen 0
The IO errors occur when we allocate a regular extent in previous data
relocation block group.
On zoned btrfs, we use a dedicated block group to relocate a data
extent. Thus, we allocate relocating data extents (pre-alloc) only from
the dedicated block group and vice versa. Once the free space in the
dedicated block group gets tight, a relocating extent may not fit into
the block group. In that case, we need to switch the dedicated block
group to the next one. Then, the previous one is now freed up for
allocating a regular extent. The BG is already not enough to allocate
the relocating extent, but there is still room to allocate a smaller
extent. Now the problem happens. By allocating a regular extent while
nocow IOs for the relocation is still on-going, we will issue WRITE IOs
(for relocation) and ZONE APPEND IOs (for the regular writes) at the
same time. That mixed IOs confuses the write pointer and arises the
unaligned write errors.
This commit introduces a new bit 'zoned_data_reloc_ongoing' to the
btrfs_block_group. We set this bit before releasing the dedicated block
group, and no extent are allocated from a block group having this bit
set. This bit is similar to setting block_group->ro, but is different from
it by allowing nocow writes to start.
Once all the nocow IO for relocation is done (hooked from
btrfs_finish_ordered_io), we reset the bit to release the block group for
further allocation.
Fixes: c2707a255623 ("btrfs: zoned: add a dedicated data relocation block group")
CC: stable@vger.kernel.org # 5.16+
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2022-06-07 15:08:29 +08:00
|
|
|
|
|
|
|
void btrfs_zoned_release_data_reloc_bg(struct btrfs_fs_info *fs_info, u64 logical,
|
|
|
|
u64 length)
|
|
|
|
{
|
|
|
|
struct btrfs_block_group *block_group;
|
|
|
|
|
|
|
|
if (!btrfs_is_zoned(fs_info))
|
|
|
|
return;
|
|
|
|
|
|
|
|
block_group = btrfs_lookup_block_group(fs_info, logical);
|
|
|
|
/* It should be called on a previous data relocation block group. */
|
|
|
|
ASSERT(block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA));
|
|
|
|
|
|
|
|
spin_lock(&block_group->lock);
|
2022-07-16 03:45:24 +08:00
|
|
|
if (!test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags))
|
btrfs: zoned: prevent allocation from previous data relocation BG
After commit 5f0addf7b890 ("btrfs: zoned: use dedicated lock for data
relocation"), we observe IO errors on e.g, btrfs/232 like below.
[09.0][T4038707] WARNING: CPU: 3 PID: 4038707 at fs/btrfs/extent-tree.c:2381 btrfs_cross_ref_exist+0xfc/0x120 [btrfs]
<snip>
[09.9][T4038707] Call Trace:
[09.5][T4038707] <TASK>
[09.3][T4038707] run_delalloc_nocow+0x7f1/0x11a0 [btrfs]
[09.6][T4038707] ? test_range_bit+0x174/0x320 [btrfs]
[09.2][T4038707] ? fallback_to_cow+0x980/0x980 [btrfs]
[09.3][T4038707] ? find_lock_delalloc_range+0x33e/0x3e0 [btrfs]
[09.5][T4038707] btrfs_run_delalloc_range+0x445/0x1320 [btrfs]
[09.2][T4038707] ? test_range_bit+0x320/0x320 [btrfs]
[09.4][T4038707] ? lock_downgrade+0x6a0/0x6a0
[09.2][T4038707] ? orc_find.part.0+0x1ed/0x300
[09.5][T4038707] ? __module_address.part.0+0x25/0x300
[09.0][T4038707] writepage_delalloc+0x159/0x310 [btrfs]
<snip>
[09.4][ C3] sd 10:0:1:0: [sde] tag#2620 FAILED Result: hostbyte=DID_OK driverbyte=DRIVER_OK cmd_age=0s
[09.5][ C3] sd 10:0:1:0: [sde] tag#2620 Sense Key : Illegal Request [current]
[09.9][ C3] sd 10:0:1:0: [sde] tag#2620 Add. Sense: Unaligned write command
[09.5][ C3] sd 10:0:1:0: [sde] tag#2620 CDB: Write(16) 8a 00 00 00 00 00 02 f3 63 87 00 00 00 2c 00 00
[09.4][ C3] critical target error, dev sde, sector 396041272 op 0x1:(WRITE) flags 0x800 phys_seg 3 prio class 0
[09.9][ C3] BTRFS error (device dm-1): bdev /dev/mapper/dml_102_2 errs: wr 1, rd 0, flush 0, corrupt 0, gen 0
The IO errors occur when we allocate a regular extent in previous data
relocation block group.
On zoned btrfs, we use a dedicated block group to relocate a data
extent. Thus, we allocate relocating data extents (pre-alloc) only from
the dedicated block group and vice versa. Once the free space in the
dedicated block group gets tight, a relocating extent may not fit into
the block group. In that case, we need to switch the dedicated block
group to the next one. Then, the previous one is now freed up for
allocating a regular extent. The BG is already not enough to allocate
the relocating extent, but there is still room to allocate a smaller
extent. Now the problem happens. By allocating a regular extent while
nocow IOs for the relocation is still on-going, we will issue WRITE IOs
(for relocation) and ZONE APPEND IOs (for the regular writes) at the
same time. That mixed IOs confuses the write pointer and arises the
unaligned write errors.
This commit introduces a new bit 'zoned_data_reloc_ongoing' to the
btrfs_block_group. We set this bit before releasing the dedicated block
group, and no extent are allocated from a block group having this bit
set. This bit is similar to setting block_group->ro, but is different from
it by allowing nocow writes to start.
Once all the nocow IO for relocation is done (hooked from
btrfs_finish_ordered_io), we reset the bit to release the block group for
further allocation.
Fixes: c2707a255623 ("btrfs: zoned: add a dedicated data relocation block group")
CC: stable@vger.kernel.org # 5.16+
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2022-06-07 15:08:29 +08:00
|
|
|
goto out;
|
|
|
|
|
|
|
|
/* All relocation extents are written. */
|
|
|
|
if (block_group->start + block_group->alloc_offset == logical + length) {
|
|
|
|
/* Now, release this block group for further allocations. */
|
2022-07-16 03:45:24 +08:00
|
|
|
clear_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC,
|
|
|
|
&block_group->runtime_flags);
|
btrfs: zoned: prevent allocation from previous data relocation BG
After commit 5f0addf7b890 ("btrfs: zoned: use dedicated lock for data
relocation"), we observe IO errors on e.g, btrfs/232 like below.
[09.0][T4038707] WARNING: CPU: 3 PID: 4038707 at fs/btrfs/extent-tree.c:2381 btrfs_cross_ref_exist+0xfc/0x120 [btrfs]
<snip>
[09.9][T4038707] Call Trace:
[09.5][T4038707] <TASK>
[09.3][T4038707] run_delalloc_nocow+0x7f1/0x11a0 [btrfs]
[09.6][T4038707] ? test_range_bit+0x174/0x320 [btrfs]
[09.2][T4038707] ? fallback_to_cow+0x980/0x980 [btrfs]
[09.3][T4038707] ? find_lock_delalloc_range+0x33e/0x3e0 [btrfs]
[09.5][T4038707] btrfs_run_delalloc_range+0x445/0x1320 [btrfs]
[09.2][T4038707] ? test_range_bit+0x320/0x320 [btrfs]
[09.4][T4038707] ? lock_downgrade+0x6a0/0x6a0
[09.2][T4038707] ? orc_find.part.0+0x1ed/0x300
[09.5][T4038707] ? __module_address.part.0+0x25/0x300
[09.0][T4038707] writepage_delalloc+0x159/0x310 [btrfs]
<snip>
[09.4][ C3] sd 10:0:1:0: [sde] tag#2620 FAILED Result: hostbyte=DID_OK driverbyte=DRIVER_OK cmd_age=0s
[09.5][ C3] sd 10:0:1:0: [sde] tag#2620 Sense Key : Illegal Request [current]
[09.9][ C3] sd 10:0:1:0: [sde] tag#2620 Add. Sense: Unaligned write command
[09.5][ C3] sd 10:0:1:0: [sde] tag#2620 CDB: Write(16) 8a 00 00 00 00 00 02 f3 63 87 00 00 00 2c 00 00
[09.4][ C3] critical target error, dev sde, sector 396041272 op 0x1:(WRITE) flags 0x800 phys_seg 3 prio class 0
[09.9][ C3] BTRFS error (device dm-1): bdev /dev/mapper/dml_102_2 errs: wr 1, rd 0, flush 0, corrupt 0, gen 0
The IO errors occur when we allocate a regular extent in previous data
relocation block group.
On zoned btrfs, we use a dedicated block group to relocate a data
extent. Thus, we allocate relocating data extents (pre-alloc) only from
the dedicated block group and vice versa. Once the free space in the
dedicated block group gets tight, a relocating extent may not fit into
the block group. In that case, we need to switch the dedicated block
group to the next one. Then, the previous one is now freed up for
allocating a regular extent. The BG is already not enough to allocate
the relocating extent, but there is still room to allocate a smaller
extent. Now the problem happens. By allocating a regular extent while
nocow IOs for the relocation is still on-going, we will issue WRITE IOs
(for relocation) and ZONE APPEND IOs (for the regular writes) at the
same time. That mixed IOs confuses the write pointer and arises the
unaligned write errors.
This commit introduces a new bit 'zoned_data_reloc_ongoing' to the
btrfs_block_group. We set this bit before releasing the dedicated block
group, and no extent are allocated from a block group having this bit
set. This bit is similar to setting block_group->ro, but is different from
it by allowing nocow writes to start.
Once all the nocow IO for relocation is done (hooked from
btrfs_finish_ordered_io), we reset the bit to release the block group for
further allocation.
Fixes: c2707a255623 ("btrfs: zoned: add a dedicated data relocation block group")
CC: stable@vger.kernel.org # 5.16+
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2022-06-07 15:08:29 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
spin_unlock(&block_group->lock);
|
|
|
|
btrfs_put_block_group(block_group);
|
|
|
|
}
|
2022-07-09 07:18:44 +08:00
|
|
|
|
|
|
|
int btrfs_zone_finish_one_bg(struct btrfs_fs_info *fs_info)
|
|
|
|
{
|
|
|
|
struct btrfs_block_group *block_group;
|
|
|
|
struct btrfs_block_group *min_bg = NULL;
|
|
|
|
u64 min_avail = U64_MAX;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
spin_lock(&fs_info->zone_active_bgs_lock);
|
|
|
|
list_for_each_entry(block_group, &fs_info->zone_active_bgs,
|
|
|
|
active_bg_list) {
|
|
|
|
u64 avail;
|
|
|
|
|
|
|
|
spin_lock(&block_group->lock);
|
btrfs: zoned: count fresh BG region as zone unusable
The naming of space_info->active_total_bytes is misleading. It counts
not only active block groups but also full ones which are previously
active but now inactive. That confusion results in a bug not counting
the full BGs into active_total_bytes on mount time.
For a background, there are three kinds of block groups in terms of
activation.
1. Block groups never activated
2. Block groups currently active
3. Block groups previously active and currently inactive (due to fully
written or zone finish)
What we really wanted to exclude from "total_bytes" is the total size of
BGs #1. They seem empty and allocatable but since they are not activated,
we cannot rely on them to do the space reservation.
And, since BGs #1 never get activated, they should have no "used",
"reserved" and "pinned" bytes.
OTOH, BGs #3 can be counted in the "total", since they are already full
we cannot allocate from them anyway. For them, "total_bytes == used +
reserved + pinned + zone_unusable" should hold.
Tracking #2 and #3 as "active_total_bytes" (current implementation) is
confusing. And, tracking #1 and subtract that properly from "total_bytes"
every time you need space reservation is cumbersome.
Instead, we can count the whole region of a newly allocated block group as
zone_unusable. Then, once that block group is activated, release
[0 .. zone_capacity] from the zone_unusable counters. With this, we can
eliminate the confusing ->active_total_bytes and the code will be common
among regular and the zoned mode. Also, no additional counter is needed
with this approach.
Fixes: 6a921de58992 ("btrfs: zoned: introduce space_info->active_total_bytes")
CC: stable@vger.kernel.org # 6.1+
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2023-03-13 15:06:13 +08:00
|
|
|
if (block_group->reserved || block_group->alloc_offset == 0 ||
|
2022-07-09 07:18:44 +08:00
|
|
|
(block_group->flags & BTRFS_BLOCK_GROUP_SYSTEM)) {
|
|
|
|
spin_unlock(&block_group->lock);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
avail = block_group->zone_capacity - block_group->alloc_offset;
|
|
|
|
if (min_avail > avail) {
|
|
|
|
if (min_bg)
|
|
|
|
btrfs_put_block_group(min_bg);
|
|
|
|
min_bg = block_group;
|
|
|
|
min_avail = avail;
|
|
|
|
btrfs_get_block_group(min_bg);
|
|
|
|
}
|
|
|
|
spin_unlock(&block_group->lock);
|
|
|
|
}
|
|
|
|
spin_unlock(&fs_info->zone_active_bgs_lock);
|
|
|
|
|
|
|
|
if (!min_bg)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
ret = btrfs_zone_finish(min_bg);
|
|
|
|
btrfs_put_block_group(min_bg);
|
|
|
|
|
|
|
|
return ret < 0 ? ret : 1;
|
|
|
|
}
|
2022-07-09 07:18:47 +08:00
|
|
|
|
|
|
|
int btrfs_zoned_activate_one_bg(struct btrfs_fs_info *fs_info,
|
|
|
|
struct btrfs_space_info *space_info,
|
|
|
|
bool do_finish)
|
|
|
|
{
|
|
|
|
struct btrfs_block_group *bg;
|
|
|
|
int index;
|
|
|
|
|
|
|
|
if (!btrfs_is_zoned(fs_info) || (space_info->flags & BTRFS_BLOCK_GROUP_DATA))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
int ret;
|
|
|
|
bool need_finish = false;
|
|
|
|
|
|
|
|
down_read(&space_info->groups_sem);
|
|
|
|
for (index = 0; index < BTRFS_NR_RAID_TYPES; index++) {
|
|
|
|
list_for_each_entry(bg, &space_info->block_groups[index],
|
|
|
|
list) {
|
|
|
|
if (!spin_trylock(&bg->lock))
|
|
|
|
continue;
|
2022-07-16 03:45:24 +08:00
|
|
|
if (btrfs_zoned_bg_is_full(bg) ||
|
|
|
|
test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE,
|
|
|
|
&bg->runtime_flags)) {
|
2022-07-09 07:18:47 +08:00
|
|
|
spin_unlock(&bg->lock);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
spin_unlock(&bg->lock);
|
|
|
|
|
|
|
|
if (btrfs_zone_activate(bg)) {
|
|
|
|
up_read(&space_info->groups_sem);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
need_finish = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
up_read(&space_info->groups_sem);
|
|
|
|
|
|
|
|
if (!do_finish || !need_finish)
|
|
|
|
break;
|
|
|
|
|
|
|
|
ret = btrfs_zone_finish_one_bg(fs_info);
|
|
|
|
if (ret == 0)
|
|
|
|
break;
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|