mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 04:18:39 +08:00
vfs: elide smp_mb in iversion handling in the common case
According to bpftrace on these routines most calls result in cmpxchg, which already provides the same guarantee. In inode_maybe_inc_iversion elision is possible because even if the wrong value was read due to now missing smp_mb fence, the issue is going to correct itself after cmpxchg. If it appears cmpxchg wont be issued, the fence + reload are there bringing back previous behavior. Signed-off-by: Mateusz Guzik <mjguzik@gmail.com> Link: https://lore.kernel.org/r/20240815083310.3865-1-mjguzik@gmail.com Reviewed-by: Jeff Layton <jlayton@kernel.org> Signed-off-by: Christian Brauner <brauner@kernel.org>
This commit is contained in:
parent
433f9d76a0
commit
b381fbbccb
28
fs/libfs.c
28
fs/libfs.c
@ -2003,13 +2003,19 @@ bool inode_maybe_inc_iversion(struct inode *inode, bool force)
|
||||
* information, but the legacy inode_inc_iversion code used a spinlock
|
||||
* to serialize increments.
|
||||
*
|
||||
* Here, we add full memory barriers to ensure that any de-facto
|
||||
* ordering with other info is preserved.
|
||||
* We add a full memory barrier to ensure that any de facto ordering
|
||||
* with other state is preserved (either implicitly coming from cmpxchg
|
||||
* or explicitly from smp_mb if we don't know upfront if we will execute
|
||||
* the former).
|
||||
*
|
||||
* This barrier pairs with the barrier in inode_query_iversion()
|
||||
* These barriers pair with inode_query_iversion().
|
||||
*/
|
||||
smp_mb();
|
||||
cur = inode_peek_iversion_raw(inode);
|
||||
if (!force && !(cur & I_VERSION_QUERIED)) {
|
||||
smp_mb();
|
||||
cur = inode_peek_iversion_raw(inode);
|
||||
}
|
||||
|
||||
do {
|
||||
/* If flag is clear then we needn't do anything */
|
||||
if (!force && !(cur & I_VERSION_QUERIED))
|
||||
@ -2038,20 +2044,22 @@ EXPORT_SYMBOL(inode_maybe_inc_iversion);
|
||||
u64 inode_query_iversion(struct inode *inode)
|
||||
{
|
||||
u64 cur, new;
|
||||
bool fenced = false;
|
||||
|
||||
/*
|
||||
* Memory barriers (implicit in cmpxchg, explicit in smp_mb) pair with
|
||||
* inode_maybe_inc_iversion(), see that routine for more details.
|
||||
*/
|
||||
cur = inode_peek_iversion_raw(inode);
|
||||
do {
|
||||
/* If flag is already set, then no need to swap */
|
||||
if (cur & I_VERSION_QUERIED) {
|
||||
/*
|
||||
* This barrier (and the implicit barrier in the
|
||||
* cmpxchg below) pairs with the barrier in
|
||||
* inode_maybe_inc_iversion().
|
||||
*/
|
||||
smp_mb();
|
||||
if (!fenced)
|
||||
smp_mb();
|
||||
break;
|
||||
}
|
||||
|
||||
fenced = true;
|
||||
new = cur | I_VERSION_QUERIED;
|
||||
} while (!atomic64_try_cmpxchg(&inode->i_version, &cur, new));
|
||||
return cur >> I_VERSION_QUERIED_SHIFT;
|
||||
|
Loading…
Reference in New Issue
Block a user