btrfs: replace map_lookup->stripe_len by BTRFS_STRIPE_LEN

Currently btrfs doesn't support stripe lengths other than 64KiB.
This is already set in the tree-checker.

There is really no meaning to record that fixed value in map_lookup for
now, and can all be replaced with BTRFS_STRIPE_LEN.

Furthermore we can use the fix stripe length to do the following
optimization:

- Use BTRFS_STRIPE_LEN_SHIFT to replace some 64bit division
  Now we only need to do a right shift.

  And the value of BTRFS_STRIPE_LEN itself is already too large for bit
  shift, thus if we accidentally use BTRFS_STRIPE_LEN to do bit shift,
  a compiler warning would be triggered.

  Thus this bit shift optimization would be safe.

- Use BTRFS_STRIPE_LEN_MASK to calculate the offset inside a stripe

Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Qu Wenruo <wqu@suse.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
Qu Wenruo 2023-02-17 13:36:58 +08:00 committed by David Sterba
parent dcb2137c84
commit a97699d1d6
5 changed files with 66 additions and 60 deletions

View File

@ -1977,12 +1977,12 @@ int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start,
map = em->map_lookup; map = em->map_lookup;
data_stripe_length = em->orig_block_len; data_stripe_length = em->orig_block_len;
io_stripe_size = map->stripe_len; io_stripe_size = BTRFS_STRIPE_LEN;
chunk_start = em->start; chunk_start = em->start;
/* For RAID5/6 adjust to a full IO stripe length */ /* For RAID5/6 adjust to a full IO stripe length */
if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
io_stripe_size = map->stripe_len * nr_data_stripes(map); io_stripe_size = nr_data_stripes(map) << BTRFS_STRIPE_LEN_SHIFT;
buf = kcalloc(map->num_stripes, sizeof(u64), GFP_NOFS); buf = kcalloc(map->num_stripes, sizeof(u64), GFP_NOFS);
if (!buf) { if (!buf) {
@ -2000,8 +2000,10 @@ int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start,
data_stripe_length)) data_stripe_length))
continue; continue;
stripe_nr = physical - map->stripes[i].physical; stripe_nr = (physical - map->stripes[i].physical) >>
stripe_nr = div64_u64_rem(stripe_nr, map->stripe_len, &offset); BTRFS_STRIPE_LEN_SHIFT;
offset = (physical - map->stripes[i].physical) &
BTRFS_STRIPE_LEN_MASK;
if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
BTRFS_BLOCK_GROUP_RAID10)) { BTRFS_BLOCK_GROUP_RAID10)) {

View File

@ -2722,7 +2722,7 @@ static int scrub_extent(struct scrub_ctx *sctx, struct map_lookup *map,
if (flags & BTRFS_EXTENT_FLAG_DATA) { if (flags & BTRFS_EXTENT_FLAG_DATA) {
if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
blocksize = map->stripe_len; blocksize = BTRFS_STRIPE_LEN;
else else
blocksize = sctx->fs_info->sectorsize; blocksize = sctx->fs_info->sectorsize;
spin_lock(&sctx->stat_lock); spin_lock(&sctx->stat_lock);
@ -2731,7 +2731,7 @@ static int scrub_extent(struct scrub_ctx *sctx, struct map_lookup *map,
spin_unlock(&sctx->stat_lock); spin_unlock(&sctx->stat_lock);
} else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
blocksize = map->stripe_len; blocksize = BTRFS_STRIPE_LEN;
else else
blocksize = sctx->fs_info->nodesize; blocksize = sctx->fs_info->nodesize;
spin_lock(&sctx->stat_lock); spin_lock(&sctx->stat_lock);
@ -2920,9 +2920,9 @@ static int get_raid56_logic_offset(u64 physical, int num,
*offset = last_offset; *offset = last_offset;
for (i = 0; i < data_stripes; i++) { for (i = 0; i < data_stripes; i++) {
*offset = last_offset + i * map->stripe_len; *offset = last_offset + (i << BTRFS_STRIPE_LEN_SHIFT);
stripe_nr = div64_u64(*offset, map->stripe_len); stripe_nr = *offset >> BTRFS_STRIPE_LEN_SHIFT;
stripe_nr = div_u64(stripe_nr, data_stripes); stripe_nr = div_u64(stripe_nr, data_stripes);
/* Work out the disk rotation on this stripe-set */ /* Work out the disk rotation on this stripe-set */
@ -2935,7 +2935,7 @@ static int get_raid56_logic_offset(u64 physical, int num,
if (stripe_index < num) if (stripe_index < num)
j++; j++;
} }
*offset = last_offset + j * map->stripe_len; *offset = last_offset + (j << BTRFS_STRIPE_LEN_SHIFT);
return 1; return 1;
} }
@ -3205,7 +3205,7 @@ static int scrub_raid56_data_stripe_for_parity(struct scrub_ctx *sctx,
/* Path must not be populated */ /* Path must not be populated */
ASSERT(!path->nodes[0]); ASSERT(!path->nodes[0]);
while (cur_logical < logical + map->stripe_len) { while (cur_logical < logical + BTRFS_STRIPE_LEN) {
struct btrfs_io_context *bioc = NULL; struct btrfs_io_context *bioc = NULL;
struct btrfs_device *extent_dev; struct btrfs_device *extent_dev;
u64 extent_start; u64 extent_start;
@ -3217,7 +3217,7 @@ static int scrub_raid56_data_stripe_for_parity(struct scrub_ctx *sctx,
u64 extent_mirror_num; u64 extent_mirror_num;
ret = find_first_extent_item(extent_root, path, cur_logical, ret = find_first_extent_item(extent_root, path, cur_logical,
logical + map->stripe_len - cur_logical); logical + BTRFS_STRIPE_LEN - cur_logical);
/* No more extent item in this data stripe */ /* No more extent item in this data stripe */
if (ret > 0) { if (ret > 0) {
ret = 0; ret = 0;
@ -3231,7 +3231,7 @@ static int scrub_raid56_data_stripe_for_parity(struct scrub_ctx *sctx,
/* Metadata should not cross stripe boundaries */ /* Metadata should not cross stripe boundaries */
if ((extent_flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) && if ((extent_flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) &&
does_range_cross_boundary(extent_start, extent_size, does_range_cross_boundary(extent_start, extent_size,
logical, map->stripe_len)) { logical, BTRFS_STRIPE_LEN)) {
btrfs_err(fs_info, btrfs_err(fs_info,
"scrub: tree block %llu spanning stripes, ignored. logical=%llu", "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
extent_start, logical); extent_start, logical);
@ -3247,7 +3247,7 @@ static int scrub_raid56_data_stripe_for_parity(struct scrub_ctx *sctx,
/* Truncate the range inside this data stripe */ /* Truncate the range inside this data stripe */
extent_size = min(extent_start + extent_size, extent_size = min(extent_start + extent_size,
logical + map->stripe_len) - cur_logical; logical + BTRFS_STRIPE_LEN) - cur_logical;
extent_start = cur_logical; extent_start = cur_logical;
ASSERT(extent_size <= U32_MAX); ASSERT(extent_size <= U32_MAX);
@ -3320,8 +3320,7 @@ static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx,
path->search_commit_root = 1; path->search_commit_root = 1;
path->skip_locking = 1; path->skip_locking = 1;
ASSERT(map->stripe_len <= U32_MAX); nsectors = BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits;
nsectors = map->stripe_len >> fs_info->sectorsize_bits;
ASSERT(nsectors <= BITS_PER_LONG); ASSERT(nsectors <= BITS_PER_LONG);
sparity = kzalloc(sizeof(struct scrub_parity), GFP_NOFS); sparity = kzalloc(sizeof(struct scrub_parity), GFP_NOFS);
if (!sparity) { if (!sparity) {
@ -3332,8 +3331,7 @@ static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx,
return -ENOMEM; return -ENOMEM;
} }
ASSERT(map->stripe_len <= U32_MAX); sparity->stripe_len = BTRFS_STRIPE_LEN;
sparity->stripe_len = map->stripe_len;
sparity->nsectors = nsectors; sparity->nsectors = nsectors;
sparity->sctx = sctx; sparity->sctx = sctx;
sparity->scrub_dev = sdev; sparity->scrub_dev = sdev;
@ -3344,7 +3342,7 @@ static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx,
ret = 0; ret = 0;
for (cur_logical = logic_start; cur_logical < logic_end; for (cur_logical = logic_start; cur_logical < logic_end;
cur_logical += map->stripe_len) { cur_logical += BTRFS_STRIPE_LEN) {
ret = scrub_raid56_data_stripe_for_parity(sctx, sparity, map, ret = scrub_raid56_data_stripe_for_parity(sctx, sparity, map,
sdev, path, cur_logical); sdev, path, cur_logical);
if (ret < 0) if (ret < 0)
@ -3536,7 +3534,7 @@ static u64 simple_stripe_full_stripe_len(const struct map_lookup *map)
ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 | ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 |
BTRFS_BLOCK_GROUP_RAID10)); BTRFS_BLOCK_GROUP_RAID10));
return map->num_stripes / map->sub_stripes * map->stripe_len; return (map->num_stripes / map->sub_stripes) << BTRFS_STRIPE_LEN_SHIFT;
} }
/* Get the logical bytenr for the stripe */ /* Get the logical bytenr for the stripe */
@ -3552,7 +3550,8 @@ static u64 simple_stripe_get_logical(struct map_lookup *map,
* (stripe_index / sub_stripes) gives how many data stripes we need to * (stripe_index / sub_stripes) gives how many data stripes we need to
* skip. * skip.
*/ */
return (stripe_index / map->sub_stripes) * map->stripe_len + bg->start; return ((stripe_index / map->sub_stripes) << BTRFS_STRIPE_LEN_SHIFT) +
bg->start;
} }
/* Get the mirror number for the stripe */ /* Get the mirror number for the stripe */
@ -3589,14 +3588,14 @@ static int scrub_simple_stripe(struct scrub_ctx *sctx,
* this stripe. * this stripe.
*/ */
ret = scrub_simple_mirror(sctx, extent_root, csum_root, bg, map, ret = scrub_simple_mirror(sctx, extent_root, csum_root, bg, map,
cur_logical, map->stripe_len, device, cur_logical, BTRFS_STRIPE_LEN, device,
cur_physical, mirror_num); cur_physical, mirror_num);
if (ret) if (ret)
return ret; return ret;
/* Skip to next stripe which belongs to the target device */ /* Skip to next stripe which belongs to the target device */
cur_logical += logical_increment; cur_logical += logical_increment;
/* For physical offset, we just go to next stripe */ /* For physical offset, we just go to next stripe */
cur_physical += map->stripe_len; cur_physical += BTRFS_STRIPE_LEN;
} }
return ret; return ret;
} }
@ -3690,7 +3689,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
if (profile & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) { if (profile & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) {
ret = scrub_simple_stripe(sctx, root, csum_root, bg, map, ret = scrub_simple_stripe(sctx, root, csum_root, bg, map,
scrub_dev, stripe_index); scrub_dev, stripe_index);
offset = map->stripe_len * (stripe_index / map->sub_stripes); offset = (stripe_index / map->sub_stripes) << BTRFS_STRIPE_LEN_SHIFT;
goto out; goto out;
} }
@ -3705,7 +3704,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
/* Initialize @offset in case we need to go to out: label */ /* Initialize @offset in case we need to go to out: label */
get_raid56_logic_offset(physical, stripe_index, map, &offset, NULL); get_raid56_logic_offset(physical, stripe_index, map, &offset, NULL);
increment = map->stripe_len * nr_data_stripes(map); increment = nr_data_stripes(map) << BTRFS_STRIPE_LEN_SHIFT;
/* /*
* Due to the rotation, for RAID56 it's better to iterate each stripe * Due to the rotation, for RAID56 it's better to iterate each stripe
@ -3736,13 +3735,13 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
* is still based on @mirror_num. * is still based on @mirror_num.
*/ */
ret = scrub_simple_mirror(sctx, root, csum_root, bg, map, ret = scrub_simple_mirror(sctx, root, csum_root, bg, map,
logical, map->stripe_len, logical, BTRFS_STRIPE_LEN,
scrub_dev, physical, 1); scrub_dev, physical, 1);
if (ret < 0) if (ret < 0)
goto out; goto out;
next: next:
logical += increment; logical += increment;
physical += map->stripe_len; physical += BTRFS_STRIPE_LEN;
spin_lock(&sctx->stat_lock); spin_lock(&sctx->stat_lock);
if (stop_loop) if (stop_loop)
sctx->stat.last_physical = sctx->stat.last_physical =

View File

@ -486,7 +486,6 @@ static int test_rmap_block(struct btrfs_fs_info *fs_info,
em->map_lookup = map; em->map_lookup = map;
map->num_stripes = test->num_stripes; map->num_stripes = test->num_stripes;
map->stripe_len = BTRFS_STRIPE_LEN;
map->type = test->raid_type; map->type = test->raid_type;
for (i = 0; i < map->num_stripes; i++) { for (i = 0; i < map->num_stripes; i++) {

View File

@ -5125,7 +5125,7 @@ static void init_alloc_chunk_ctl_policy_regular(
/* We don't want a chunk larger than 10% of writable space */ /* We don't want a chunk larger than 10% of writable space */
ctl->max_chunk_size = min(mult_perc(fs_devices->total_rw_bytes, 10), ctl->max_chunk_size = min(mult_perc(fs_devices->total_rw_bytes, 10),
ctl->max_chunk_size); ctl->max_chunk_size);
ctl->dev_extent_min = BTRFS_STRIPE_LEN * ctl->dev_stripes; ctl->dev_extent_min = ctl->dev_stripes << BTRFS_STRIPE_LEN_SHIFT;
} }
static void init_alloc_chunk_ctl_policy_zoned( static void init_alloc_chunk_ctl_policy_zoned(
@ -5407,7 +5407,6 @@ static struct btrfs_block_group *create_chunk(struct btrfs_trans_handle *trans,
j * ctl->stripe_size; j * ctl->stripe_size;
} }
} }
map->stripe_len = BTRFS_STRIPE_LEN;
map->io_align = BTRFS_STRIPE_LEN; map->io_align = BTRFS_STRIPE_LEN;
map->io_width = BTRFS_STRIPE_LEN; map->io_width = BTRFS_STRIPE_LEN;
map->type = type; map->type = type;
@ -5615,11 +5614,11 @@ int btrfs_chunk_alloc_add_chunk_item(struct btrfs_trans_handle *trans,
btrfs_set_stack_chunk_length(chunk, bg->length); btrfs_set_stack_chunk_length(chunk, bg->length);
btrfs_set_stack_chunk_owner(chunk, BTRFS_EXTENT_TREE_OBJECTID); btrfs_set_stack_chunk_owner(chunk, BTRFS_EXTENT_TREE_OBJECTID);
btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len); btrfs_set_stack_chunk_stripe_len(chunk, BTRFS_STRIPE_LEN);
btrfs_set_stack_chunk_type(chunk, map->type); btrfs_set_stack_chunk_type(chunk, map->type);
btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes); btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
btrfs_set_stack_chunk_io_align(chunk, map->stripe_len); btrfs_set_stack_chunk_io_align(chunk, BTRFS_STRIPE_LEN);
btrfs_set_stack_chunk_io_width(chunk, map->stripe_len); btrfs_set_stack_chunk_io_width(chunk, BTRFS_STRIPE_LEN);
btrfs_set_stack_chunk_sector_size(chunk, fs_info->sectorsize); btrfs_set_stack_chunk_sector_size(chunk, fs_info->sectorsize);
btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes); btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
@ -5809,7 +5808,7 @@ unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info,
if (!WARN_ON(IS_ERR(em))) { if (!WARN_ON(IS_ERR(em))) {
map = em->map_lookup; map = em->map_lookup;
if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
len = map->stripe_len * nr_data_stripes(map); len = nr_data_stripes(map) << BTRFS_STRIPE_LEN_SHIFT;
free_extent_map(em); free_extent_map(em);
} }
return len; return len;
@ -5975,7 +5974,6 @@ struct btrfs_discard_stripe *btrfs_map_discard(struct btrfs_fs_info *fs_info,
u64 stripe_nr_end; u64 stripe_nr_end;
u64 stripe_end_offset; u64 stripe_end_offset;
u64 stripe_cnt; u64 stripe_cnt;
u64 stripe_len;
u64 stripe_offset; u64 stripe_offset;
u32 stripe_index; u32 stripe_index;
u32 factor = 0; u32 factor = 0;
@ -5996,26 +5994,25 @@ struct btrfs_discard_stripe *btrfs_map_discard(struct btrfs_fs_info *fs_info,
if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
ret = -EOPNOTSUPP; ret = -EOPNOTSUPP;
goto out_free_map; goto out_free_map;
} }
offset = logical - em->start; offset = logical - em->start;
length = min_t(u64, em->start + em->len - logical, length); length = min_t(u64, em->start + em->len - logical, length);
*length_ret = length; *length_ret = length;
stripe_len = map->stripe_len;
/* /*
* stripe_nr counts the total number of stripes we have to stride * stripe_nr counts the total number of stripes we have to stride
* to get to this block * to get to this block
*/ */
stripe_nr = div64_u64(offset, stripe_len); stripe_nr = offset >> BTRFS_STRIPE_LEN_SHIFT;
/* stripe_offset is the offset of this block in its stripe */ /* stripe_offset is the offset of this block in its stripe */
stripe_offset = offset - stripe_nr * stripe_len; stripe_offset = offset - (stripe_nr << BTRFS_STRIPE_LEN_SHIFT);
stripe_nr_end = round_up(offset + length, map->stripe_len); stripe_nr_end = round_up(offset + length, BTRFS_STRIPE_LEN) >>
stripe_nr_end = div64_u64(stripe_nr_end, map->stripe_len); BTRFS_STRIPE_LEN_SHIFT;
stripe_cnt = stripe_nr_end - stripe_nr; stripe_cnt = stripe_nr_end - stripe_nr;
stripe_end_offset = stripe_nr_end * map->stripe_len - stripe_end_offset = (stripe_nr_end << BTRFS_STRIPE_LEN_SHIFT) -
(offset + length); (offset + length);
/* /*
* after this, stripe_nr is the number of stripes on this * after this, stripe_nr is the number of stripes on this
@ -6057,15 +6054,15 @@ struct btrfs_discard_stripe *btrfs_map_discard(struct btrfs_fs_info *fs_info,
for (i = 0; i < *num_stripes; i++) { for (i = 0; i < *num_stripes; i++) {
stripes[i].physical = stripes[i].physical =
map->stripes[stripe_index].physical + map->stripes[stripe_index].physical +
stripe_offset + stripe_nr * map->stripe_len; stripe_offset + (stripe_nr << BTRFS_STRIPE_LEN_SHIFT);
stripes[i].dev = map->stripes[stripe_index].dev; stripes[i].dev = map->stripes[stripe_index].dev;
if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
BTRFS_BLOCK_GROUP_RAID10)) { BTRFS_BLOCK_GROUP_RAID10)) {
stripes[i].length = stripes_per_dev * map->stripe_len; stripes[i].length = stripes_per_dev << BTRFS_STRIPE_LEN_SHIFT;
if (i / sub_stripes < remaining_stripes) if (i / sub_stripes < remaining_stripes)
stripes[i].length += map->stripe_len; stripes[i].length += BTRFS_STRIPE_LEN;
/* /*
* Special for the first stripe and * Special for the first stripe and
@ -6304,22 +6301,32 @@ static u64 btrfs_max_io_len(struct map_lookup *map, enum btrfs_map_op op,
u64 offset, u64 *stripe_nr, u64 *stripe_offset, u64 offset, u64 *stripe_nr, u64 *stripe_offset,
u64 *full_stripe_start) u64 *full_stripe_start)
{ {
u32 stripe_len = map->stripe_len;
ASSERT(op != BTRFS_MAP_DISCARD); ASSERT(op != BTRFS_MAP_DISCARD);
/* /*
* Stripe_nr is the stripe where this block falls. stripe_offset is * Stripe_nr is the stripe where this block falls. stripe_offset is
* the offset of this block in its stripe. * the offset of this block in its stripe.
*/ */
*stripe_nr = div64_u64_rem(offset, stripe_len, stripe_offset); *stripe_offset = offset & BTRFS_STRIPE_LEN_MASK;
*stripe_nr = offset >> BTRFS_STRIPE_LEN_SHIFT;
ASSERT(*stripe_offset < U32_MAX); ASSERT(*stripe_offset < U32_MAX);
if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
unsigned long full_stripe_len = stripe_len * nr_data_stripes(map); unsigned long full_stripe_len = nr_data_stripes(map) <<
BTRFS_STRIPE_LEN_SHIFT;
/*
* For full stripe start, we use previously calculated
* @stripe_nr. Align it to nr_data_stripes, then multiply with
* STRIPE_LEN.
*
* By this we can avoid u64 division completely. And we have
* to go rounddown(), not round_down(), as nr_data_stripes is
* not ensured to be power of 2.
*/
*full_stripe_start = *full_stripe_start =
div64_u64(offset, full_stripe_len) * full_stripe_len; rounddown(*stripe_nr, nr_data_stripes(map)) <<
BTRFS_STRIPE_LEN_SHIFT;
/* /*
* For writes to RAID56, allow to write a full stripe set, but * For writes to RAID56, allow to write a full stripe set, but
@ -6334,7 +6341,7 @@ static u64 btrfs_max_io_len(struct map_lookup *map, enum btrfs_map_op op,
* a single disk). * a single disk).
*/ */
if (map->type & BTRFS_BLOCK_GROUP_STRIPE_MASK) if (map->type & BTRFS_BLOCK_GROUP_STRIPE_MASK)
return stripe_len - *stripe_offset; return BTRFS_STRIPE_LEN - *stripe_offset;
return U64_MAX; return U64_MAX;
} }
@ -6343,7 +6350,7 @@ static void set_io_stripe(struct btrfs_io_stripe *dst, const struct map_lookup *
{ {
dst->dev = map->stripes[stripe_index].dev; dst->dev = map->stripes[stripe_index].dev;
dst->physical = map->stripes[stripe_index].physical + dst->physical = map->stripes[stripe_index].physical +
stripe_offset + stripe_nr * map->stripe_len; stripe_offset + (stripe_nr << BTRFS_STRIPE_LEN_SHIFT);
} }
int __btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op, int __btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
@ -6357,7 +6364,6 @@ int __btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
u64 map_offset; u64 map_offset;
u64 stripe_offset; u64 stripe_offset;
u64 stripe_nr; u64 stripe_nr;
u64 stripe_len;
u32 stripe_index; u32 stripe_index;
int data_stripes; int data_stripes;
int i; int i;
@ -6384,7 +6390,6 @@ int __btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
map = em->map_lookup; map = em->map_lookup;
data_stripes = nr_data_stripes(map); data_stripes = nr_data_stripes(map);
stripe_len = map->stripe_len;
map_offset = logical - em->start; map_offset = logical - em->start;
max_len = btrfs_max_io_len(map, op, map_offset, &stripe_nr, max_len = btrfs_max_io_len(map, op, map_offset, &stripe_nr,
@ -6460,11 +6465,10 @@ int __btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
} }
} else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
ASSERT(map->stripe_len == BTRFS_STRIPE_LEN);
if (need_raid_map && (need_full_stripe(op) || mirror_num > 1)) { if (need_raid_map && (need_full_stripe(op) || mirror_num > 1)) {
/* push stripe_nr back to the start of the full stripe */ /* push stripe_nr back to the start of the full stripe */
stripe_nr = div64_u64(raid56_full_stripe_start, stripe_nr = div64_u64(raid56_full_stripe_start,
stripe_len * data_stripes); data_stripes << BTRFS_STRIPE_LEN_SHIFT);
/* RAID[56] write or recovery. Return all stripes */ /* RAID[56] write or recovery. Return all stripes */
num_stripes = map->num_stripes; num_stripes = map->num_stripes;
@ -6473,7 +6477,7 @@ int __btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
/* Return the length to the full stripe end */ /* Return the length to the full stripe end */
*length = min(logical + *length, *length = min(logical + *length,
raid56_full_stripe_start + em->start + raid56_full_stripe_start + em->start +
data_stripes * stripe_len) - logical; (data_stripes << BTRFS_STRIPE_LEN_SHIFT)) - logical;
stripe_index = 0; stripe_index = 0;
stripe_offset = 0; stripe_offset = 0;
} else { } else {
@ -6568,7 +6572,7 @@ int __btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
tmp = stripe_nr * data_stripes; tmp = stripe_nr * data_stripes;
for (i = 0; i < data_stripes; i++) for (i = 0; i < data_stripes; i++)
bioc->raid_map[(i + rot) % num_stripes] = bioc->raid_map[(i + rot) % num_stripes] =
em->start + (tmp + i) * map->stripe_len; em->start + ((tmp + i) << BTRFS_STRIPE_LEN_SHIFT);
bioc->raid_map[(i + rot) % map->num_stripes] = RAID5_P_STRIPE; bioc->raid_map[(i + rot) % map->num_stripes] = RAID5_P_STRIPE;
if (map->type & BTRFS_BLOCK_GROUP_RAID6) if (map->type & BTRFS_BLOCK_GROUP_RAID6)
@ -6941,7 +6945,6 @@ static int read_one_chunk(struct btrfs_key *key, struct extent_buffer *leaf,
map->num_stripes = num_stripes; map->num_stripes = num_stripes;
map->io_width = btrfs_chunk_io_width(leaf, chunk); map->io_width = btrfs_chunk_io_width(leaf, chunk);
map->io_align = btrfs_chunk_io_align(leaf, chunk); map->io_align = btrfs_chunk_io_align(leaf, chunk);
map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
map->type = type; map->type = type;
/* /*
* We can't use the sub_stripes value, as for profiles other than * We can't use the sub_stripes value, as for profiles other than

View File

@ -17,7 +17,11 @@
extern struct mutex uuid_mutex; extern struct mutex uuid_mutex;
#define BTRFS_STRIPE_LEN SZ_64K #define BTRFS_STRIPE_LEN SZ_64K
#define BTRFS_STRIPE_LEN_SHIFT (16)
#define BTRFS_STRIPE_LEN_MASK (BTRFS_STRIPE_LEN - 1)
static_assert(const_ilog2(BTRFS_STRIPE_LEN) == BTRFS_STRIPE_LEN_SHIFT);
/* Used by sanity check for btrfs_raid_types. */ /* Used by sanity check for btrfs_raid_types. */
#define const_ffs(n) (__builtin_ctzll(n) + 1) #define const_ffs(n) (__builtin_ctzll(n) + 1)
@ -446,7 +450,6 @@ struct map_lookup {
u64 type; u64 type;
int io_align; int io_align;
int io_width; int io_width;
u32 stripe_len;
int num_stripes; int num_stripes;
int sub_stripes; int sub_stripes;
int verified_stripes; /* For mount time dev extent verification */ int verified_stripes; /* For mount time dev extent verification */