xfs: xfs_quota_unreserve_blkres can't fail

Unreserving quotas can't fail due to quota limits, and we'll notice a
shut down file system a bit later in all the callers anyway.  Return
void and remove the error checking and propagation in the callers.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: "Darrick J. Wong" <djwong@kernel.org>
Signed-off-by: Chandan Babu R <chandanbabu@kernel.org>
This commit is contained in:
Christoph Hellwig 2024-04-30 14:58:22 +02:00 committed by Chandan Babu R
parent f7b9ee7845
commit cc3c92e7e7
8 changed files with 20 additions and 37 deletions

View File

@ -4928,7 +4928,7 @@ xfs_bmap_split_indlen(
*indlen2 = len2;
}
int
void
xfs_bmap_del_extent_delay(
struct xfs_inode *ip,
int whichfork,
@ -4944,7 +4944,6 @@ xfs_bmap_del_extent_delay(
xfs_filblks_t got_indlen, new_indlen, stolen = 0;
uint32_t state = xfs_bmap_fork_to_state(whichfork);
uint64_t fdblocks;
int error = 0;
bool isrt;
XFS_STATS_INC(mp, xs_del_exlist);
@ -4964,9 +4963,7 @@ xfs_bmap_del_extent_delay(
* sb counters as we might have to borrow some blocks for the
* indirect block accounting.
*/
error = xfs_quota_unreserve_blkres(ip, del->br_blockcount);
if (error)
return error;
xfs_quota_unreserve_blkres(ip, del->br_blockcount);
ip->i_delayed_blks -= del->br_blockcount;
if (got->br_startoff == del->br_startoff)
@ -5064,7 +5061,6 @@ xfs_bmap_del_extent_delay(
xfs_add_fdblocks(mp, fdblocks);
xfs_mod_delalloc(ip, -(int64_t)del->br_blockcount, -da_diff);
return error;
}
void
@ -5622,18 +5618,16 @@ __xfs_bunmapi(
delete:
if (wasdel) {
error = xfs_bmap_del_extent_delay(ip, whichfork, &icur,
&got, &del);
xfs_bmap_del_extent_delay(ip, whichfork, &icur, &got, &del);
} else {
error = xfs_bmap_del_extent_real(ip, tp, &icur, cur,
&del, &tmp_logflags, whichfork,
flags);
logflags |= tmp_logflags;
if (error)
goto error0;
}
if (error)
goto error0;
end = del.br_startoff - 1;
nodelete:
/*

View File

@ -202,7 +202,7 @@ int xfs_bmapi_write(struct xfs_trans *tp, struct xfs_inode *ip,
int xfs_bunmapi(struct xfs_trans *tp, struct xfs_inode *ip,
xfs_fileoff_t bno, xfs_filblks_t len, uint32_t flags,
xfs_extnum_t nexts, int *done);
int xfs_bmap_del_extent_delay(struct xfs_inode *ip, int whichfork,
void xfs_bmap_del_extent_delay(struct xfs_inode *ip, int whichfork,
struct xfs_iext_cursor *cur, struct xfs_bmbt_irec *got,
struct xfs_bmbt_irec *del);
void xfs_bmap_del_extent_cow(struct xfs_inode *ip,

View File

@ -443,7 +443,6 @@ xfs_discard_folio(
{
struct xfs_inode *ip = XFS_I(folio->mapping->host);
struct xfs_mount *mp = ip->i_mount;
int error;
if (xfs_is_shutdown(mp))
return;
@ -457,11 +456,8 @@ xfs_discard_folio(
* byte of the next folio. Hence the end offset is only dependent on the
* folio itself and not the start offset that is passed in.
*/
error = xfs_bmap_punch_delalloc_range(ip, pos,
xfs_bmap_punch_delalloc_range(ip, pos,
folio_pos(folio) + folio_size(folio));
if (error && !xfs_is_shutdown(mp))
xfs_alert(mp, "page discard unable to remove delalloc mapping.");
}
static const struct iomap_writeback_ops xfs_writeback_ops = {

View File

@ -440,7 +440,7 @@ out_unlock_iolock:
* if the ranges only partially overlap them, so it is up to the caller to
* ensure that partial blocks are not passed in.
*/
int
void
xfs_bmap_punch_delalloc_range(
struct xfs_inode *ip,
xfs_off_t start_byte,
@ -452,7 +452,6 @@ xfs_bmap_punch_delalloc_range(
xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, end_byte);
struct xfs_bmbt_irec got, del;
struct xfs_iext_cursor icur;
int error = 0;
ASSERT(!xfs_need_iread_extents(ifp));
@ -476,15 +475,13 @@ xfs_bmap_punch_delalloc_range(
continue;
}
error = xfs_bmap_del_extent_delay(ip, XFS_DATA_FORK, &icur,
&got, &del);
if (error || !xfs_iext_get_extent(ifp, &icur, &got))
xfs_bmap_del_extent_delay(ip, XFS_DATA_FORK, &icur, &got, &del);
if (!xfs_iext_get_extent(ifp, &icur, &got))
break;
}
out_unlock:
xfs_iunlock(ip, XFS_ILOCK_EXCL);
return error;
}
/*

View File

@ -30,7 +30,7 @@ xfs_bmap_rtalloc(struct xfs_bmalloca *ap)
}
#endif /* CONFIG_XFS_RT */
int xfs_bmap_punch_delalloc_range(struct xfs_inode *ip,
void xfs_bmap_punch_delalloc_range(struct xfs_inode *ip,
xfs_off_t start_byte, xfs_off_t end_byte);
struct kgetbmap {

View File

@ -1232,8 +1232,8 @@ xfs_buffered_write_delalloc_punch(
loff_t offset,
loff_t length)
{
return xfs_bmap_punch_delalloc_range(XFS_I(inode), offset,
offset + length);
xfs_bmap_punch_delalloc_range(XFS_I(inode), offset, offset + length);
return 0;
}
static int

View File

@ -215,10 +215,11 @@ xfs_quota_reserve_blkres(struct xfs_inode *ip, int64_t blocks)
return xfs_trans_reserve_quota_nblks(NULL, ip, blocks, 0, false);
}
static inline int
xfs_quota_unreserve_blkres(struct xfs_inode *ip, int64_t blocks)
static inline void
xfs_quota_unreserve_blkres(struct xfs_inode *ip, uint64_t blocks)
{
return xfs_quota_reserve_blkres(ip, -blocks);
/* don't return an error as unreserving quotas can't fail */
xfs_quota_reserve_blkres(ip, -(int64_t)blocks);
}
extern int xfs_mount_reset_sbqflags(struct xfs_mount *);

View File

@ -592,10 +592,8 @@ xfs_reflink_cancel_cow_blocks(
trace_xfs_reflink_cancel_cow(ip, &del);
if (isnullstartblock(del.br_startblock)) {
error = xfs_bmap_del_extent_delay(ip, XFS_COW_FORK,
&icur, &got, &del);
if (error)
break;
xfs_bmap_del_extent_delay(ip, XFS_COW_FORK, &icur, &got,
&del);
} else if (del.br_state == XFS_EXT_UNWRITTEN || cancel_real) {
ASSERT((*tpp)->t_highest_agno == NULLAGNUMBER);
@ -618,10 +616,7 @@ xfs_reflink_cancel_cow_blocks(
xfs_bmap_del_extent_cow(ip, &icur, &got, &del);
/* Remove the quota reservation */
error = xfs_quota_unreserve_blkres(ip,
del.br_blockcount);
if (error)
break;
xfs_quota_unreserve_blkres(ip, del.br_blockcount);
} else {
/* Didn't do anything, push cursor back. */
xfs_iext_prev(ifp, &icur);