xfs: rework splitting of indirect block reservations

Move the check if we have enough indirect blocks and the stealing of
the deleted extent blocks out of xfs_bmap_split_indlen and into the
caller to prepare for handling delayed allocation of RT extents that
can't easily be stolen.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: "Darrick J. Wong" <djwong@kernel.org>
Signed-off-by: Chandan Babu R <chandanbabu@kernel.org>
This commit is contained in:
Christoph Hellwig 2024-04-22 13:20:17 +02:00 committed by Chandan Babu R
parent 727f843163
commit da2b9c3a8d

View File

@ -4831,31 +4831,17 @@ error0:
* ores == 1). The number of stolen blocks is returned. The availability and
* subsequent accounting of stolen blocks is the responsibility of the caller.
*/
static xfs_filblks_t
static void
xfs_bmap_split_indlen(
xfs_filblks_t ores, /* original res. */
xfs_filblks_t *indlen1, /* ext1 worst indlen */
xfs_filblks_t *indlen2, /* ext2 worst indlen */
xfs_filblks_t avail) /* stealable blocks */
xfs_filblks_t *indlen2) /* ext2 worst indlen */
{
xfs_filblks_t len1 = *indlen1;
xfs_filblks_t len2 = *indlen2;
xfs_filblks_t nres = len1 + len2; /* new total res. */
xfs_filblks_t stolen = 0;
xfs_filblks_t resfactor;
/*
* Steal as many blocks as we can to try and satisfy the worst case
* indlen for both new extents.
*/
if (ores < nres && avail)
stolen = XFS_FILBLKS_MIN(nres - ores, avail);
ores += stolen;
/* nothing else to do if we've satisfied the new reservation */
if (ores >= nres)
return stolen;
/*
* We can't meet the total required reservation for the two extents.
* Calculate the percent of the overall shortage between both extents
@ -4900,8 +4886,6 @@ xfs_bmap_split_indlen(
*indlen1 = len1;
*indlen2 = len2;
return stolen;
}
int
@ -4917,7 +4901,7 @@ xfs_bmap_del_extent_delay(
struct xfs_bmbt_irec new;
int64_t da_old, da_new, da_diff = 0;
xfs_fileoff_t del_endoff, got_endoff;
xfs_filblks_t got_indlen, new_indlen, stolen;
xfs_filblks_t got_indlen, new_indlen, stolen = 0;
uint32_t state = xfs_bmap_fork_to_state(whichfork);
uint64_t fdblocks;
int error = 0;
@ -4996,8 +4980,19 @@ xfs_bmap_del_extent_delay(
new_indlen = xfs_bmap_worst_indlen(ip, new.br_blockcount);
WARN_ON_ONCE(!got_indlen || !new_indlen);
stolen = xfs_bmap_split_indlen(da_old, &got_indlen, &new_indlen,
del->br_blockcount);
/*
* Steal as many blocks as we can to try and satisfy the worst
* case indlen for both new extents.
*/
da_new = got_indlen + new_indlen;
if (da_new > da_old) {
stolen = XFS_FILBLKS_MIN(da_new - da_old,
del->br_blockcount);
da_old += stolen;
}
if (da_new > da_old)
xfs_bmap_split_indlen(da_old, &got_indlen, &new_indlen);
da_new = got_indlen + new_indlen;
got->br_startblock = nullstartblock((int)got_indlen);
@ -5009,7 +5004,6 @@ xfs_bmap_del_extent_delay(
xfs_iext_next(ifp, icur);
xfs_iext_insert(ip, icur, &new, state);
da_new = got_indlen + new_indlen - stolen;
del->br_blockcount -= stolen;
break;
}