mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-24 04:34:08 +08:00
Changes since last update:
- Fix a locking problem during xattr block conversion that could lead to the log checkpointing thread to try to write an incomplete buffer to disk, which leads to a corruption shutdown - Fix a null pointer dereference when removing delayed allocation extents - Remove post-eof speculative allocations when reflinking a block past current inode size so that we don't just leave them there and assert on inode reclaim - Relax an assert which didn't accurately reflect the way locking works and would trigger under heavy io load - Avoid infinite loop when cancelling copy on write extents after a writeback failure - Try to avoid copy on write transaction reservation overflows when remapping after a successful write - Fix various problems with the copy-on-write reservation automatic garbage collection not being cleaned up properly during a ro remount - Fix problems with rmap log items being processed in the wrong order, leading to corruption shutdowns - Fix problems with EFI recovery wherein the "remove any rmapping if present" mechanism wasn't actually doing anything, which would lead to corruption problems later when the extent is reallocated, leading to multiple rmaps for the same extent -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQIcBAABCgAGBQJaO+dwAAoJEPh/dxk0SrTrY8YP/R9AXH3Wt6S2QGGjZfXURa22 /cioJKFl8hWay00ZT8Zcj4Pdx6R+stvausj5ECDvpdWZG+d28e61c1bxg+bqRYO5 JWXikWnAa80RQ5uEjOXHoUjAgk6u6YYuQHEuHH/xA0nL4Cw98WLSzLjqk7ZU53rx P17dgUWWHta/w8OpxG9UG5pxvNW3VRitiyCMWxa2gzBPncHnCk3fu9lInpDzH9S+ xakwCRtfiAykoOG/O5pnMg6vw5r6ENwK7DymxXgqF+Vv/HzgMbeJs+9UON2eACtp ECHGffN4pXpqWVcGDMs5cWCOfLUEjxCrotMLYpIrdZs5DptmOcOWpQpHWl4JiaXB rqAxx3D0Yo+00ENponM01un8UgCXF5gqsDGyTzn99aPpDVqxCJw1XmSdOXRhcnnF At2raUkXF+nbqaVwL3Y7ZJuOKs1hi3HpsYwwfvClR8cTFk/BaY6sQ4QnVR0Ggkg6 8lZxeDb8VdoUjWO11sX1edwGtR8g+p3PSHiUFSnh1JsbP2I0R+TV+j5Y9rMotxFT Eq6+Ehp889GeSpEBCrDpMgNIABMjBxoi5JvOwXSUNhF5Rh/1Vf//7v31nXcyVlah a95IhCYfQLFMtaYaGr2ElvdO+Qs1+ppsD207I4H86XotjRkvD7U+mJoYm9EaujQX jgUDdZEsP5h5DX524VHU =i51V -----END PGP SIGNATURE----- Merge tag 'xfs-4.15-fixes-8' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux Pull xfs fixes from Darrick Wong: "Here are some XFS fixes for 4.15-rc5. Apologies for the unusually large number of patches this late, but I wanted to make sure the corruption fixes were really ready to go. Changes since last update: - Fix a locking problem during xattr block conversion that could lead to the log checkpointing thread to try to write an incomplete buffer to disk, which leads to a corruption shutdown - Fix a null pointer dereference when removing delayed allocation extents - Remove post-eof speculative allocations when reflinking a block past current inode size so that we don't just leave them there and assert on inode reclaim - Relax an assert which didn't accurately reflect the way locking works and would trigger under heavy io load - Avoid infinite loop when cancelling copy on write extents after a writeback failure - Try to avoid copy on write transaction reservation overflows when remapping after a successful write - Fix various problems with the copy-on-write reservation automatic garbage collection not being cleaned up properly during a ro remount - Fix problems with rmap log items being processed in the wrong order, leading to corruption shutdowns - Fix problems with EFI recovery wherein the "remove any rmapping if present" mechanism wasn't actually doing anything, which would lead to corruption problems later when the extent is reallocated, leading to multiple rmaps for the same extent" * tag 'xfs-4.15-fixes-8' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux: xfs: only skip rmap owner checks for unknown-owner rmap removal xfs: always honor OWN_UNKNOWN rmap removal requests xfs: queue deferred rmap ops for cow staging extent alloc/free in the right order xfs: set cowblocks tag for direct cow writes too xfs: remove leftover CoW reservations when remounting ro xfs: don't be so eager to clear the cowblocks tag on truncate xfs: track cowblocks separately in i_flags xfs: allow CoW remap transactions to use reserve blocks xfs: avoid infinite loop when cancelling CoW blocks after writeback failure xfs: relax is_reflink_inode assert in xfs_reflink_find_cow_mapping xfs: remove dest file's post-eof preallocations before reflinking xfs: move xfs_iext_insert tracepoint to report useful information xfs: account for null transactions in bunmapi xfs: hold xfs_buf locked between shortform->leaf conversion and the addition of an attribute xfs: add the ability to join a held buffer to a defer_ops
This commit is contained in:
commit
fca0e39b2b
@ -702,7 +702,7 @@ xfs_alloc_ag_vextent(
|
||||
ASSERT(args->agbno % args->alignment == 0);
|
||||
|
||||
/* if not file data, insert new block into the reverse map btree */
|
||||
if (args->oinfo.oi_owner != XFS_RMAP_OWN_UNKNOWN) {
|
||||
if (!xfs_rmap_should_skip_owner_update(&args->oinfo)) {
|
||||
error = xfs_rmap_alloc(args->tp, args->agbp, args->agno,
|
||||
args->agbno, args->len, &args->oinfo);
|
||||
if (error)
|
||||
@ -1682,7 +1682,7 @@ xfs_free_ag_extent(
|
||||
bno_cur = cnt_cur = NULL;
|
||||
mp = tp->t_mountp;
|
||||
|
||||
if (oinfo->oi_owner != XFS_RMAP_OWN_UNKNOWN) {
|
||||
if (!xfs_rmap_should_skip_owner_update(oinfo)) {
|
||||
error = xfs_rmap_free(tp, agbp, agno, bno, len, oinfo);
|
||||
if (error)
|
||||
goto error0;
|
||||
|
@ -212,6 +212,7 @@ xfs_attr_set(
|
||||
int flags)
|
||||
{
|
||||
struct xfs_mount *mp = dp->i_mount;
|
||||
struct xfs_buf *leaf_bp = NULL;
|
||||
struct xfs_da_args args;
|
||||
struct xfs_defer_ops dfops;
|
||||
struct xfs_trans_res tres;
|
||||
@ -327,9 +328,16 @@ xfs_attr_set(
|
||||
* GROT: another possible req'mt for a double-split btree op.
|
||||
*/
|
||||
xfs_defer_init(args.dfops, args.firstblock);
|
||||
error = xfs_attr_shortform_to_leaf(&args);
|
||||
error = xfs_attr_shortform_to_leaf(&args, &leaf_bp);
|
||||
if (error)
|
||||
goto out_defer_cancel;
|
||||
/*
|
||||
* Prevent the leaf buffer from being unlocked so that a
|
||||
* concurrent AIL push cannot grab the half-baked leaf
|
||||
* buffer and run into problems with the write verifier.
|
||||
*/
|
||||
xfs_trans_bhold(args.trans, leaf_bp);
|
||||
xfs_defer_bjoin(args.dfops, leaf_bp);
|
||||
xfs_defer_ijoin(args.dfops, dp);
|
||||
error = xfs_defer_finish(&args.trans, args.dfops);
|
||||
if (error)
|
||||
@ -337,13 +345,14 @@ xfs_attr_set(
|
||||
|
||||
/*
|
||||
* Commit the leaf transformation. We'll need another (linked)
|
||||
* transaction to add the new attribute to the leaf.
|
||||
* transaction to add the new attribute to the leaf, which
|
||||
* means that we have to hold & join the leaf buffer here too.
|
||||
*/
|
||||
|
||||
error = xfs_trans_roll_inode(&args.trans, dp);
|
||||
if (error)
|
||||
goto out;
|
||||
|
||||
xfs_trans_bjoin(args.trans, leaf_bp);
|
||||
leaf_bp = NULL;
|
||||
}
|
||||
|
||||
if (xfs_bmap_one_block(dp, XFS_ATTR_FORK))
|
||||
@ -374,8 +383,9 @@ xfs_attr_set(
|
||||
|
||||
out_defer_cancel:
|
||||
xfs_defer_cancel(&dfops);
|
||||
args.trans = NULL;
|
||||
out:
|
||||
if (leaf_bp)
|
||||
xfs_trans_brelse(args.trans, leaf_bp);
|
||||
if (args.trans)
|
||||
xfs_trans_cancel(args.trans);
|
||||
xfs_iunlock(dp, XFS_ILOCK_EXCL);
|
||||
|
@ -735,10 +735,13 @@ xfs_attr_shortform_getvalue(xfs_da_args_t *args)
|
||||
}
|
||||
|
||||
/*
|
||||
* Convert from using the shortform to the leaf.
|
||||
* Convert from using the shortform to the leaf. On success, return the
|
||||
* buffer so that we can keep it locked until we're totally done with it.
|
||||
*/
|
||||
int
|
||||
xfs_attr_shortform_to_leaf(xfs_da_args_t *args)
|
||||
xfs_attr_shortform_to_leaf(
|
||||
struct xfs_da_args *args,
|
||||
struct xfs_buf **leaf_bp)
|
||||
{
|
||||
xfs_inode_t *dp;
|
||||
xfs_attr_shortform_t *sf;
|
||||
@ -818,7 +821,7 @@ xfs_attr_shortform_to_leaf(xfs_da_args_t *args)
|
||||
sfe = XFS_ATTR_SF_NEXTENTRY(sfe);
|
||||
}
|
||||
error = 0;
|
||||
|
||||
*leaf_bp = bp;
|
||||
out:
|
||||
kmem_free(tmpbuffer);
|
||||
return error;
|
||||
|
@ -48,7 +48,8 @@ void xfs_attr_shortform_create(struct xfs_da_args *args);
|
||||
void xfs_attr_shortform_add(struct xfs_da_args *args, int forkoff);
|
||||
int xfs_attr_shortform_lookup(struct xfs_da_args *args);
|
||||
int xfs_attr_shortform_getvalue(struct xfs_da_args *args);
|
||||
int xfs_attr_shortform_to_leaf(struct xfs_da_args *args);
|
||||
int xfs_attr_shortform_to_leaf(struct xfs_da_args *args,
|
||||
struct xfs_buf **leaf_bp);
|
||||
int xfs_attr_shortform_remove(struct xfs_da_args *args);
|
||||
int xfs_attr_shortform_allfit(struct xfs_buf *bp, struct xfs_inode *dp);
|
||||
int xfs_attr_shortform_bytesfit(struct xfs_inode *dp, int bytes);
|
||||
|
@ -5136,7 +5136,7 @@ __xfs_bunmapi(
|
||||
* blowing out the transaction with a mix of EFIs and reflink
|
||||
* adjustments.
|
||||
*/
|
||||
if (xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK)
|
||||
if (tp && xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK)
|
||||
max_len = min(len, xfs_refcount_max_unmap(tp->t_log_res));
|
||||
else
|
||||
max_len = len;
|
||||
|
@ -249,6 +249,10 @@ xfs_defer_trans_roll(
|
||||
for (i = 0; i < XFS_DEFER_OPS_NR_INODES && dop->dop_inodes[i]; i++)
|
||||
xfs_trans_log_inode(*tp, dop->dop_inodes[i], XFS_ILOG_CORE);
|
||||
|
||||
/* Hold the (previously bjoin'd) buffer locked across the roll. */
|
||||
for (i = 0; i < XFS_DEFER_OPS_NR_BUFS && dop->dop_bufs[i]; i++)
|
||||
xfs_trans_dirty_buf(*tp, dop->dop_bufs[i]);
|
||||
|
||||
trace_xfs_defer_trans_roll((*tp)->t_mountp, dop);
|
||||
|
||||
/* Roll the transaction. */
|
||||
@ -264,6 +268,12 @@ xfs_defer_trans_roll(
|
||||
for (i = 0; i < XFS_DEFER_OPS_NR_INODES && dop->dop_inodes[i]; i++)
|
||||
xfs_trans_ijoin(*tp, dop->dop_inodes[i], 0);
|
||||
|
||||
/* Rejoin the buffers and dirty them so the log moves forward. */
|
||||
for (i = 0; i < XFS_DEFER_OPS_NR_BUFS && dop->dop_bufs[i]; i++) {
|
||||
xfs_trans_bjoin(*tp, dop->dop_bufs[i]);
|
||||
xfs_trans_bhold(*tp, dop->dop_bufs[i]);
|
||||
}
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
@ -295,6 +305,31 @@ xfs_defer_ijoin(
|
||||
}
|
||||
}
|
||||
|
||||
ASSERT(0);
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
|
||||
/*
|
||||
* Add this buffer to the deferred op. Each joined buffer is relogged
|
||||
* each time we roll the transaction.
|
||||
*/
|
||||
int
|
||||
xfs_defer_bjoin(
|
||||
struct xfs_defer_ops *dop,
|
||||
struct xfs_buf *bp)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < XFS_DEFER_OPS_NR_BUFS; i++) {
|
||||
if (dop->dop_bufs[i] == bp)
|
||||
return 0;
|
||||
else if (dop->dop_bufs[i] == NULL) {
|
||||
dop->dop_bufs[i] = bp;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
ASSERT(0);
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
|
||||
@ -493,9 +528,7 @@ xfs_defer_init(
|
||||
struct xfs_defer_ops *dop,
|
||||
xfs_fsblock_t *fbp)
|
||||
{
|
||||
dop->dop_committed = false;
|
||||
dop->dop_low = false;
|
||||
memset(&dop->dop_inodes, 0, sizeof(dop->dop_inodes));
|
||||
memset(dop, 0, sizeof(struct xfs_defer_ops));
|
||||
*fbp = NULLFSBLOCK;
|
||||
INIT_LIST_HEAD(&dop->dop_intake);
|
||||
INIT_LIST_HEAD(&dop->dop_pending);
|
||||
|
@ -59,6 +59,7 @@ enum xfs_defer_ops_type {
|
||||
};
|
||||
|
||||
#define XFS_DEFER_OPS_NR_INODES 2 /* join up to two inodes */
|
||||
#define XFS_DEFER_OPS_NR_BUFS 2 /* join up to two buffers */
|
||||
|
||||
struct xfs_defer_ops {
|
||||
bool dop_committed; /* did any trans commit? */
|
||||
@ -66,8 +67,9 @@ struct xfs_defer_ops {
|
||||
struct list_head dop_intake; /* unlogged pending work */
|
||||
struct list_head dop_pending; /* logged pending work */
|
||||
|
||||
/* relog these inodes with each roll */
|
||||
/* relog these with each roll */
|
||||
struct xfs_inode *dop_inodes[XFS_DEFER_OPS_NR_INODES];
|
||||
struct xfs_buf *dop_bufs[XFS_DEFER_OPS_NR_BUFS];
|
||||
};
|
||||
|
||||
void xfs_defer_add(struct xfs_defer_ops *dop, enum xfs_defer_ops_type type,
|
||||
@ -77,6 +79,7 @@ void xfs_defer_cancel(struct xfs_defer_ops *dop);
|
||||
void xfs_defer_init(struct xfs_defer_ops *dop, xfs_fsblock_t *fbp);
|
||||
bool xfs_defer_has_unfinished_work(struct xfs_defer_ops *dop);
|
||||
int xfs_defer_ijoin(struct xfs_defer_ops *dop, struct xfs_inode *ip);
|
||||
int xfs_defer_bjoin(struct xfs_defer_ops *dop, struct xfs_buf *bp);
|
||||
|
||||
/* Description of a deferred type. */
|
||||
struct xfs_defer_op_type {
|
||||
|
@ -632,8 +632,6 @@ xfs_iext_insert(
|
||||
struct xfs_iext_leaf *new = NULL;
|
||||
int nr_entries, i;
|
||||
|
||||
trace_xfs_iext_insert(ip, cur, state, _RET_IP_);
|
||||
|
||||
if (ifp->if_height == 0)
|
||||
xfs_iext_alloc_root(ifp, cur);
|
||||
else if (ifp->if_height == 1)
|
||||
@ -661,6 +659,8 @@ xfs_iext_insert(
|
||||
xfs_iext_set(cur_rec(cur), irec);
|
||||
ifp->if_bytes += sizeof(struct xfs_iext_rec);
|
||||
|
||||
trace_xfs_iext_insert(ip, cur, state, _RET_IP_);
|
||||
|
||||
if (new)
|
||||
xfs_iext_insert_node(ifp, xfs_iext_leaf_key(new, 0), new, 2);
|
||||
}
|
||||
|
@ -1488,27 +1488,12 @@ __xfs_refcount_cow_alloc(
|
||||
xfs_extlen_t aglen,
|
||||
struct xfs_defer_ops *dfops)
|
||||
{
|
||||
int error;
|
||||
|
||||
trace_xfs_refcount_cow_increase(rcur->bc_mp, rcur->bc_private.a.agno,
|
||||
agbno, aglen);
|
||||
|
||||
/* Add refcount btree reservation */
|
||||
error = xfs_refcount_adjust_cow(rcur, agbno, aglen,
|
||||
return xfs_refcount_adjust_cow(rcur, agbno, aglen,
|
||||
XFS_REFCOUNT_ADJUST_COW_ALLOC, dfops);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
/* Add rmap entry */
|
||||
if (xfs_sb_version_hasrmapbt(&rcur->bc_mp->m_sb)) {
|
||||
error = xfs_rmap_alloc_extent(rcur->bc_mp, dfops,
|
||||
rcur->bc_private.a.agno,
|
||||
agbno, aglen, XFS_RMAP_OWN_COW);
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1521,27 +1506,12 @@ __xfs_refcount_cow_free(
|
||||
xfs_extlen_t aglen,
|
||||
struct xfs_defer_ops *dfops)
|
||||
{
|
||||
int error;
|
||||
|
||||
trace_xfs_refcount_cow_decrease(rcur->bc_mp, rcur->bc_private.a.agno,
|
||||
agbno, aglen);
|
||||
|
||||
/* Remove refcount btree reservation */
|
||||
error = xfs_refcount_adjust_cow(rcur, agbno, aglen,
|
||||
return xfs_refcount_adjust_cow(rcur, agbno, aglen,
|
||||
XFS_REFCOUNT_ADJUST_COW_FREE, dfops);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
/* Remove rmap entry */
|
||||
if (xfs_sb_version_hasrmapbt(&rcur->bc_mp->m_sb)) {
|
||||
error = xfs_rmap_free_extent(rcur->bc_mp, dfops,
|
||||
rcur->bc_private.a.agno,
|
||||
agbno, aglen, XFS_RMAP_OWN_COW);
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
/* Record a CoW staging extent in the refcount btree. */
|
||||
@ -1552,11 +1522,19 @@ xfs_refcount_alloc_cow_extent(
|
||||
xfs_fsblock_t fsb,
|
||||
xfs_extlen_t len)
|
||||
{
|
||||
int error;
|
||||
|
||||
if (!xfs_sb_version_hasreflink(&mp->m_sb))
|
||||
return 0;
|
||||
|
||||
return __xfs_refcount_add(mp, dfops, XFS_REFCOUNT_ALLOC_COW,
|
||||
error = __xfs_refcount_add(mp, dfops, XFS_REFCOUNT_ALLOC_COW,
|
||||
fsb, len);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
/* Add rmap entry */
|
||||
return xfs_rmap_alloc_extent(mp, dfops, XFS_FSB_TO_AGNO(mp, fsb),
|
||||
XFS_FSB_TO_AGBNO(mp, fsb), len, XFS_RMAP_OWN_COW);
|
||||
}
|
||||
|
||||
/* Forget a CoW staging event in the refcount btree. */
|
||||
@ -1567,9 +1545,17 @@ xfs_refcount_free_cow_extent(
|
||||
xfs_fsblock_t fsb,
|
||||
xfs_extlen_t len)
|
||||
{
|
||||
int error;
|
||||
|
||||
if (!xfs_sb_version_hasreflink(&mp->m_sb))
|
||||
return 0;
|
||||
|
||||
/* Remove rmap entry */
|
||||
error = xfs_rmap_free_extent(mp, dfops, XFS_FSB_TO_AGNO(mp, fsb),
|
||||
XFS_FSB_TO_AGBNO(mp, fsb), len, XFS_RMAP_OWN_COW);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
return __xfs_refcount_add(mp, dfops, XFS_REFCOUNT_FREE_COW,
|
||||
fsb, len);
|
||||
}
|
||||
|
@ -367,6 +367,51 @@ xfs_rmap_lookup_le_range(
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* Perform all the relevant owner checks for a removal op. If we're doing an
|
||||
* unknown-owner removal then we have no owner information to check.
|
||||
*/
|
||||
static int
|
||||
xfs_rmap_free_check_owner(
|
||||
struct xfs_mount *mp,
|
||||
uint64_t ltoff,
|
||||
struct xfs_rmap_irec *rec,
|
||||
xfs_fsblock_t bno,
|
||||
xfs_filblks_t len,
|
||||
uint64_t owner,
|
||||
uint64_t offset,
|
||||
unsigned int flags)
|
||||
{
|
||||
int error = 0;
|
||||
|
||||
if (owner == XFS_RMAP_OWN_UNKNOWN)
|
||||
return 0;
|
||||
|
||||
/* Make sure the unwritten flag matches. */
|
||||
XFS_WANT_CORRUPTED_GOTO(mp, (flags & XFS_RMAP_UNWRITTEN) ==
|
||||
(rec->rm_flags & XFS_RMAP_UNWRITTEN), out);
|
||||
|
||||
/* Make sure the owner matches what we expect to find in the tree. */
|
||||
XFS_WANT_CORRUPTED_GOTO(mp, owner == rec->rm_owner, out);
|
||||
|
||||
/* Check the offset, if necessary. */
|
||||
if (XFS_RMAP_NON_INODE_OWNER(owner))
|
||||
goto out;
|
||||
|
||||
if (flags & XFS_RMAP_BMBT_BLOCK) {
|
||||
XFS_WANT_CORRUPTED_GOTO(mp, rec->rm_flags & XFS_RMAP_BMBT_BLOCK,
|
||||
out);
|
||||
} else {
|
||||
XFS_WANT_CORRUPTED_GOTO(mp, rec->rm_offset <= offset, out);
|
||||
XFS_WANT_CORRUPTED_GOTO(mp,
|
||||
ltoff + rec->rm_blockcount >= offset + len,
|
||||
out);
|
||||
}
|
||||
|
||||
out:
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* Find the extent in the rmap btree and remove it.
|
||||
*
|
||||
@ -444,33 +489,40 @@ xfs_rmap_unmap(
|
||||
goto out_done;
|
||||
}
|
||||
|
||||
/* Make sure the unwritten flag matches. */
|
||||
XFS_WANT_CORRUPTED_GOTO(mp, (flags & XFS_RMAP_UNWRITTEN) ==
|
||||
(ltrec.rm_flags & XFS_RMAP_UNWRITTEN), out_error);
|
||||
/*
|
||||
* If we're doing an unknown-owner removal for EFI recovery, we expect
|
||||
* to find the full range in the rmapbt or nothing at all. If we
|
||||
* don't find any rmaps overlapping either end of the range, we're
|
||||
* done. Hopefully this means that the EFI creator already queued
|
||||
* (and finished) a RUI to remove the rmap.
|
||||
*/
|
||||
if (owner == XFS_RMAP_OWN_UNKNOWN &&
|
||||
ltrec.rm_startblock + ltrec.rm_blockcount <= bno) {
|
||||
struct xfs_rmap_irec rtrec;
|
||||
|
||||
error = xfs_btree_increment(cur, 0, &i);
|
||||
if (error)
|
||||
goto out_error;
|
||||
if (i == 0)
|
||||
goto out_done;
|
||||
error = xfs_rmap_get_rec(cur, &rtrec, &i);
|
||||
if (error)
|
||||
goto out_error;
|
||||
XFS_WANT_CORRUPTED_GOTO(mp, i == 1, out_error);
|
||||
if (rtrec.rm_startblock >= bno + len)
|
||||
goto out_done;
|
||||
}
|
||||
|
||||
/* Make sure the extent we found covers the entire freeing range. */
|
||||
XFS_WANT_CORRUPTED_GOTO(mp, ltrec.rm_startblock <= bno &&
|
||||
ltrec.rm_startblock + ltrec.rm_blockcount >=
|
||||
bno + len, out_error);
|
||||
ltrec.rm_startblock + ltrec.rm_blockcount >=
|
||||
bno + len, out_error);
|
||||
|
||||
/* Make sure the owner matches what we expect to find in the tree. */
|
||||
XFS_WANT_CORRUPTED_GOTO(mp, owner == ltrec.rm_owner ||
|
||||
XFS_RMAP_NON_INODE_OWNER(owner), out_error);
|
||||
|
||||
/* Check the offset, if necessary. */
|
||||
if (!XFS_RMAP_NON_INODE_OWNER(owner)) {
|
||||
if (flags & XFS_RMAP_BMBT_BLOCK) {
|
||||
XFS_WANT_CORRUPTED_GOTO(mp,
|
||||
ltrec.rm_flags & XFS_RMAP_BMBT_BLOCK,
|
||||
out_error);
|
||||
} else {
|
||||
XFS_WANT_CORRUPTED_GOTO(mp,
|
||||
ltrec.rm_offset <= offset, out_error);
|
||||
XFS_WANT_CORRUPTED_GOTO(mp,
|
||||
ltoff + ltrec.rm_blockcount >= offset + len,
|
||||
out_error);
|
||||
}
|
||||
}
|
||||
/* Check owner information. */
|
||||
error = xfs_rmap_free_check_owner(mp, ltoff, <rec, bno, len, owner,
|
||||
offset, flags);
|
||||
if (error)
|
||||
goto out_error;
|
||||
|
||||
if (ltrec.rm_startblock == bno && ltrec.rm_blockcount == len) {
|
||||
/* exact match, simply remove the record from rmap tree */
|
||||
@ -664,6 +716,7 @@ xfs_rmap_map(
|
||||
flags |= XFS_RMAP_UNWRITTEN;
|
||||
trace_xfs_rmap_map(mp, cur->bc_private.a.agno, bno, len,
|
||||
unwritten, oinfo);
|
||||
ASSERT(!xfs_rmap_should_skip_owner_update(oinfo));
|
||||
|
||||
/*
|
||||
* For the initial lookup, look for an exact match or the left-adjacent
|
||||
|
@ -61,7 +61,21 @@ static inline void
|
||||
xfs_rmap_skip_owner_update(
|
||||
struct xfs_owner_info *oi)
|
||||
{
|
||||
oi->oi_owner = XFS_RMAP_OWN_UNKNOWN;
|
||||
xfs_rmap_ag_owner(oi, XFS_RMAP_OWN_NULL);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
xfs_rmap_should_skip_owner_update(
|
||||
struct xfs_owner_info *oi)
|
||||
{
|
||||
return oi->oi_owner == XFS_RMAP_OWN_NULL;
|
||||
}
|
||||
|
||||
static inline void
|
||||
xfs_rmap_any_owner_update(
|
||||
struct xfs_owner_info *oi)
|
||||
{
|
||||
xfs_rmap_ag_owner(oi, XFS_RMAP_OWN_UNKNOWN);
|
||||
}
|
||||
|
||||
/* Reverse mapping functions. */
|
||||
|
@ -538,7 +538,7 @@ xfs_efi_recover(
|
||||
return error;
|
||||
efdp = xfs_trans_get_efd(tp, efip, efip->efi_format.efi_nextents);
|
||||
|
||||
xfs_rmap_skip_owner_update(&oinfo);
|
||||
xfs_rmap_any_owner_update(&oinfo);
|
||||
for (i = 0; i < efip->efi_format.efi_nextents; i++) {
|
||||
extp = &efip->efi_format.efi_extents[i];
|
||||
error = xfs_trans_free_extent(tp, efdp, extp->ext_start,
|
||||
|
@ -571,6 +571,11 @@ xfs_growfs_data_private(
|
||||
* this doesn't actually exist in the rmap btree.
|
||||
*/
|
||||
xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_NULL);
|
||||
error = xfs_rmap_free(tp, bp, agno,
|
||||
be32_to_cpu(agf->agf_length) - new,
|
||||
new, &oinfo);
|
||||
if (error)
|
||||
goto error0;
|
||||
error = xfs_free_extent(tp,
|
||||
XFS_AGB_TO_FSB(mp, agno,
|
||||
be32_to_cpu(agf->agf_length) - new),
|
||||
|
@ -870,7 +870,7 @@ xfs_eofblocks_worker(
|
||||
* based on the 'speculative_cow_prealloc_lifetime' tunable (5m by default).
|
||||
* (We'll just piggyback on the post-EOF prealloc space workqueue.)
|
||||
*/
|
||||
STATIC void
|
||||
void
|
||||
xfs_queue_cowblocks(
|
||||
struct xfs_mount *mp)
|
||||
{
|
||||
@ -1536,8 +1536,23 @@ xfs_inode_free_quota_eofblocks(
|
||||
return __xfs_inode_free_quota_eofblocks(ip, xfs_icache_free_eofblocks);
|
||||
}
|
||||
|
||||
static inline unsigned long
|
||||
xfs_iflag_for_tag(
|
||||
int tag)
|
||||
{
|
||||
switch (tag) {
|
||||
case XFS_ICI_EOFBLOCKS_TAG:
|
||||
return XFS_IEOFBLOCKS;
|
||||
case XFS_ICI_COWBLOCKS_TAG:
|
||||
return XFS_ICOWBLOCKS;
|
||||
default:
|
||||
ASSERT(0);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
__xfs_inode_set_eofblocks_tag(
|
||||
__xfs_inode_set_blocks_tag(
|
||||
xfs_inode_t *ip,
|
||||
void (*execute)(struct xfs_mount *mp),
|
||||
void (*set_tp)(struct xfs_mount *mp, xfs_agnumber_t agno,
|
||||
@ -1552,10 +1567,10 @@ __xfs_inode_set_eofblocks_tag(
|
||||
* Don't bother locking the AG and looking up in the radix trees
|
||||
* if we already know that we have the tag set.
|
||||
*/
|
||||
if (ip->i_flags & XFS_IEOFBLOCKS)
|
||||
if (ip->i_flags & xfs_iflag_for_tag(tag))
|
||||
return;
|
||||
spin_lock(&ip->i_flags_lock);
|
||||
ip->i_flags |= XFS_IEOFBLOCKS;
|
||||
ip->i_flags |= xfs_iflag_for_tag(tag);
|
||||
spin_unlock(&ip->i_flags_lock);
|
||||
|
||||
pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
|
||||
@ -1587,13 +1602,13 @@ xfs_inode_set_eofblocks_tag(
|
||||
xfs_inode_t *ip)
|
||||
{
|
||||
trace_xfs_inode_set_eofblocks_tag(ip);
|
||||
return __xfs_inode_set_eofblocks_tag(ip, xfs_queue_eofblocks,
|
||||
return __xfs_inode_set_blocks_tag(ip, xfs_queue_eofblocks,
|
||||
trace_xfs_perag_set_eofblocks,
|
||||
XFS_ICI_EOFBLOCKS_TAG);
|
||||
}
|
||||
|
||||
static void
|
||||
__xfs_inode_clear_eofblocks_tag(
|
||||
__xfs_inode_clear_blocks_tag(
|
||||
xfs_inode_t *ip,
|
||||
void (*clear_tp)(struct xfs_mount *mp, xfs_agnumber_t agno,
|
||||
int error, unsigned long caller_ip),
|
||||
@ -1603,7 +1618,7 @@ __xfs_inode_clear_eofblocks_tag(
|
||||
struct xfs_perag *pag;
|
||||
|
||||
spin_lock(&ip->i_flags_lock);
|
||||
ip->i_flags &= ~XFS_IEOFBLOCKS;
|
||||
ip->i_flags &= ~xfs_iflag_for_tag(tag);
|
||||
spin_unlock(&ip->i_flags_lock);
|
||||
|
||||
pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
|
||||
@ -1630,7 +1645,7 @@ xfs_inode_clear_eofblocks_tag(
|
||||
xfs_inode_t *ip)
|
||||
{
|
||||
trace_xfs_inode_clear_eofblocks_tag(ip);
|
||||
return __xfs_inode_clear_eofblocks_tag(ip,
|
||||
return __xfs_inode_clear_blocks_tag(ip,
|
||||
trace_xfs_perag_clear_eofblocks, XFS_ICI_EOFBLOCKS_TAG);
|
||||
}
|
||||
|
||||
@ -1724,7 +1739,7 @@ xfs_inode_set_cowblocks_tag(
|
||||
xfs_inode_t *ip)
|
||||
{
|
||||
trace_xfs_inode_set_cowblocks_tag(ip);
|
||||
return __xfs_inode_set_eofblocks_tag(ip, xfs_queue_cowblocks,
|
||||
return __xfs_inode_set_blocks_tag(ip, xfs_queue_cowblocks,
|
||||
trace_xfs_perag_set_cowblocks,
|
||||
XFS_ICI_COWBLOCKS_TAG);
|
||||
}
|
||||
@ -1734,6 +1749,6 @@ xfs_inode_clear_cowblocks_tag(
|
||||
xfs_inode_t *ip)
|
||||
{
|
||||
trace_xfs_inode_clear_cowblocks_tag(ip);
|
||||
return __xfs_inode_clear_eofblocks_tag(ip,
|
||||
return __xfs_inode_clear_blocks_tag(ip,
|
||||
trace_xfs_perag_clear_cowblocks, XFS_ICI_COWBLOCKS_TAG);
|
||||
}
|
||||
|
@ -81,6 +81,7 @@ void xfs_inode_clear_cowblocks_tag(struct xfs_inode *ip);
|
||||
int xfs_icache_free_cowblocks(struct xfs_mount *, struct xfs_eofblocks *);
|
||||
int xfs_inode_free_quota_cowblocks(struct xfs_inode *ip);
|
||||
void xfs_cowblocks_worker(struct work_struct *);
|
||||
void xfs_queue_cowblocks(struct xfs_mount *);
|
||||
|
||||
int xfs_inode_ag_iterator(struct xfs_mount *mp,
|
||||
int (*execute)(struct xfs_inode *ip, int flags, void *args),
|
||||
|
@ -1487,6 +1487,24 @@ xfs_link(
|
||||
return error;
|
||||
}
|
||||
|
||||
/* Clear the reflink flag and the cowblocks tag if possible. */
|
||||
static void
|
||||
xfs_itruncate_clear_reflink_flags(
|
||||
struct xfs_inode *ip)
|
||||
{
|
||||
struct xfs_ifork *dfork;
|
||||
struct xfs_ifork *cfork;
|
||||
|
||||
if (!xfs_is_reflink_inode(ip))
|
||||
return;
|
||||
dfork = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
|
||||
cfork = XFS_IFORK_PTR(ip, XFS_COW_FORK);
|
||||
if (dfork->if_bytes == 0 && cfork->if_bytes == 0)
|
||||
ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
|
||||
if (cfork->if_bytes == 0)
|
||||
xfs_inode_clear_cowblocks_tag(ip);
|
||||
}
|
||||
|
||||
/*
|
||||
* Free up the underlying blocks past new_size. The new size must be smaller
|
||||
* than the current size. This routine can be used both for the attribute and
|
||||
@ -1583,15 +1601,7 @@ xfs_itruncate_extents(
|
||||
if (error)
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* Clear the reflink flag if there are no data fork blocks and
|
||||
* there are no extents staged in the cow fork.
|
||||
*/
|
||||
if (xfs_is_reflink_inode(ip) && ip->i_cnextents == 0) {
|
||||
if (ip->i_d.di_nblocks == 0)
|
||||
ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
|
||||
xfs_inode_clear_cowblocks_tag(ip);
|
||||
}
|
||||
xfs_itruncate_clear_reflink_flags(ip);
|
||||
|
||||
/*
|
||||
* Always re-log the inode so that our permanent transaction can keep
|
||||
|
@ -232,6 +232,7 @@ static inline bool xfs_is_reflink_inode(struct xfs_inode *ip)
|
||||
* log recovery to replay a bmap operation on the inode.
|
||||
*/
|
||||
#define XFS_IRECOVERY (1 << 11)
|
||||
#define XFS_ICOWBLOCKS (1 << 12)/* has the cowblocks tag set */
|
||||
|
||||
/*
|
||||
* Per-lifetime flags need to be reset when re-using a reclaimable inode during
|
||||
|
@ -454,6 +454,8 @@ retry:
|
||||
if (error)
|
||||
goto out_bmap_cancel;
|
||||
|
||||
xfs_inode_set_cowblocks_tag(ip);
|
||||
|
||||
/* Finish up. */
|
||||
error = xfs_defer_finish(&tp, &dfops);
|
||||
if (error)
|
||||
@ -490,8 +492,9 @@ xfs_reflink_find_cow_mapping(
|
||||
struct xfs_iext_cursor icur;
|
||||
|
||||
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL | XFS_ILOCK_SHARED));
|
||||
ASSERT(xfs_is_reflink_inode(ip));
|
||||
|
||||
if (!xfs_is_reflink_inode(ip))
|
||||
return false;
|
||||
offset_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
|
||||
if (!xfs_iext_lookup_extent(ip, ifp, offset_fsb, &icur, &got))
|
||||
return false;
|
||||
@ -610,6 +613,9 @@ xfs_reflink_cancel_cow_blocks(
|
||||
|
||||
/* Remove the mapping from the CoW fork. */
|
||||
xfs_bmap_del_extent_cow(ip, &icur, &got, &del);
|
||||
} else {
|
||||
/* Didn't do anything, push cursor back. */
|
||||
xfs_iext_prev(ifp, &icur);
|
||||
}
|
||||
next_extent:
|
||||
if (!xfs_iext_get_extent(ifp, &icur, &got))
|
||||
@ -725,7 +731,7 @@ xfs_reflink_end_cow(
|
||||
(unsigned int)(end_fsb - offset_fsb),
|
||||
XFS_DATA_FORK);
|
||||
error = xfs_trans_alloc(ip->i_mount, &M_RES(ip->i_mount)->tr_write,
|
||||
resblks, 0, 0, &tp);
|
||||
resblks, 0, XFS_TRANS_RESERVE, &tp);
|
||||
if (error)
|
||||
goto out;
|
||||
|
||||
@ -1291,6 +1297,17 @@ xfs_reflink_remap_range(
|
||||
|
||||
trace_xfs_reflink_remap_range(src, pos_in, len, dest, pos_out);
|
||||
|
||||
/*
|
||||
* Clear out post-eof preallocations because we don't have page cache
|
||||
* backing the delayed allocations and they'll never get freed on
|
||||
* their own.
|
||||
*/
|
||||
if (xfs_can_free_eofblocks(dest, true)) {
|
||||
ret = xfs_free_eofblocks(dest);
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
/* Set flags and remap blocks. */
|
||||
ret = xfs_reflink_set_inode_flag(src, dest);
|
||||
if (ret)
|
||||
|
@ -1360,6 +1360,7 @@ xfs_fs_remount(
|
||||
xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
|
||||
return error;
|
||||
}
|
||||
xfs_queue_cowblocks(mp);
|
||||
|
||||
/* Create the per-AG metadata reservation pool .*/
|
||||
error = xfs_fs_reserve_ag_blocks(mp);
|
||||
@ -1369,6 +1370,14 @@ xfs_fs_remount(
|
||||
|
||||
/* rw -> ro */
|
||||
if (!(mp->m_flags & XFS_MOUNT_RDONLY) && (*flags & SB_RDONLY)) {
|
||||
/* Get rid of any leftover CoW reservations... */
|
||||
cancel_delayed_work_sync(&mp->m_cowblocks_work);
|
||||
error = xfs_icache_free_cowblocks(mp, NULL);
|
||||
if (error) {
|
||||
xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
|
||||
return error;
|
||||
}
|
||||
|
||||
/* Free the per-AG metadata reservation pool. */
|
||||
error = xfs_fs_unreserve_ag_blocks(mp);
|
||||
if (error) {
|
||||
|
Loading…
Reference in New Issue
Block a user