mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-15 23:14:31 +08:00
82842fee6e
Lock order in XFS is AGI -> AGF, hence for operations involving
inode unlinked list operations we always lock the AGI first. Inode
unlinked list operations operate on the inode cluster buffer,
so the lock order there is AGI -> inode cluster buffer.
For O_TMPFILE operations, this now means the lock order set down in
xfs_rename and xfs_link is AGI -> inode cluster buffer -> AGF as the
unlinked ops are done before the directory modifications that may
allocate space and lock the AGF.
Unfortunately, we also now lock the inode cluster buffer when
logging an inode so that we can attach the inode to the cluster
buffer and pin it in memory. This creates a lock order of AGF ->
inode cluster buffer in directory operations as we have to log the
inode after we've allocated new space for it.
This creates a lock inversion between the AGF and the inode cluster
buffer. Because the inode cluster buffer is shared across multiple
inodes, the inversion is not specific to individual inodes but can
occur when inodes in the same cluster buffer are accessed in
different orders.
To fix this we need move all the inode log item cluster buffer
interactions to the end of the current transaction. Unfortunately,
xfs_trans_log_inode() calls are littered throughout the transactions
with no thought to ordering against other items or locking. This
makes it difficult to do anything that involves changing the call
sites of xfs_trans_log_inode() to change locking orders.
However, we do now have a mechanism that allows is to postpone dirty
item processing to just before we commit the transaction: the
->iop_precommit method. This will be called after all the
modifications are done and high level objects like AGI and AGF
buffers have been locked and modified, thereby providing a mechanism
that guarantees we don't lock the inode cluster buffer before those
high level objects are locked.
This change is largely moving the guts of xfs_trans_log_inode() to
xfs_inode_item_precommit() and providing an extra flag context in
the inode log item to track the dirty state of the inode in the
current transaction. This also means we do a lot less repeated work
in xfs_trans_log_inode() by only doing it once per transaction when
all the work is done.
Fixes: 298f7bec50
("xfs: pin inode backing buffer to the inode log item")
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
Signed-off-by: Dave Chinner <david@fromorbit.com>
55 lines
1.9 KiB
C
55 lines
1.9 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* Copyright (c) 2000,2005 Silicon Graphics, Inc.
|
|
* All Rights Reserved.
|
|
*/
|
|
#ifndef __XFS_INODE_ITEM_H__
|
|
#define __XFS_INODE_ITEM_H__
|
|
|
|
/* kernel only definitions */
|
|
|
|
struct xfs_buf;
|
|
struct xfs_bmbt_rec;
|
|
struct xfs_inode;
|
|
struct xfs_mount;
|
|
|
|
struct xfs_inode_log_item {
|
|
struct xfs_log_item ili_item; /* common portion */
|
|
struct xfs_inode *ili_inode; /* inode ptr */
|
|
unsigned short ili_lock_flags; /* inode lock flags */
|
|
unsigned int ili_dirty_flags; /* dirty in current tx */
|
|
/*
|
|
* The ili_lock protects the interactions between the dirty state and
|
|
* the flush state of the inode log item. This allows us to do atomic
|
|
* modifications of multiple state fields without having to hold a
|
|
* specific inode lock to serialise them.
|
|
*
|
|
* We need atomic changes between inode dirtying, inode flushing and
|
|
* inode completion, but these all hold different combinations of
|
|
* ILOCK and IFLUSHING and hence we need some other method of
|
|
* serialising updates to the flush state.
|
|
*/
|
|
spinlock_t ili_lock; /* flush state lock */
|
|
unsigned int ili_last_fields; /* fields when flushed */
|
|
unsigned int ili_fields; /* fields to be logged */
|
|
unsigned int ili_fsync_fields; /* logged since last fsync */
|
|
xfs_lsn_t ili_flush_lsn; /* lsn at last flush */
|
|
xfs_csn_t ili_commit_seq; /* last transaction commit */
|
|
};
|
|
|
|
static inline int xfs_inode_clean(struct xfs_inode *ip)
|
|
{
|
|
return !ip->i_itemp || !(ip->i_itemp->ili_fields & XFS_ILOG_ALL);
|
|
}
|
|
|
|
extern void xfs_inode_item_init(struct xfs_inode *, struct xfs_mount *);
|
|
extern void xfs_inode_item_destroy(struct xfs_inode *);
|
|
extern void xfs_iflush_abort(struct xfs_inode *);
|
|
extern void xfs_iflush_shutdown_abort(struct xfs_inode *);
|
|
extern int xfs_inode_item_format_convert(xfs_log_iovec_t *,
|
|
struct xfs_inode_log_format *);
|
|
|
|
extern struct kmem_cache *xfs_ili_cache;
|
|
|
|
#endif /* __XFS_INODE_ITEM_H__ */
|