don't try to cut corners in shrink_lock_dentry()

That is to say, do *not* treat the ->d_inode or ->d_parent changes
as "it's hard, return false; somebody must have grabbed it, so
even if has zero refcount, we don't need to bother killing it -
final dput() from whoever grabbed it would've done everything".

First of all, that is not guaranteed.  It might have been dropped
by shrink_kill() handling of victim's parent, which would've found
it already on a shrink list (ours) and decided that they don't need
to put it on their shrink list.

What's more, dentry_kill() is doing pretty much the same thing,
cutting its own set of corners (it assumes that dentry can't
go from positive to negative, so its inode can change but only once
and only in one direction).

Doing that right allows to get rid of that not-quite-duplication
and removes the only reason for re-incrementing refcount before
the call of dentry_kill().

Replacement is called lock_for_kill(); called under rcu_read_lock
and with ->d_lock held.  If it returns false, dentry has non-zero
refcount and the same locks are held.  If it returns true,
dentry has zero refcount and all locks required by __dentry_kill()
are taken.

Part of __lock_parent() had been lifted into lock_parent() to
allow its reuse.  Now it's called with rcu_read_lock already
held and dentry already unlocked.

Note that this is not the final change - locking requirements for
__dentry_kill() are going to change later in the series and the
set of locks taken by lock_for_kill() will be adjusted.  Both
lock_parent() and __lock_parent() will be gone once that happens.

Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
This commit is contained in:
Al Viro 2023-10-31 01:23:47 -04:00
parent f05441c7e1
commit 339e9e1353

View File

@ -625,8 +625,6 @@ static void __dentry_kill(struct dentry *dentry)
static struct dentry *__lock_parent(struct dentry *dentry)
{
struct dentry *parent;
rcu_read_lock();
spin_unlock(&dentry->d_lock);
again:
parent = READ_ONCE(dentry->d_parent);
spin_lock(&parent->d_lock);
@ -642,7 +640,6 @@ again:
spin_unlock(&parent->d_lock);
goto again;
}
rcu_read_unlock();
if (parent != dentry)
spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
else
@ -657,7 +654,64 @@ static inline struct dentry *lock_parent(struct dentry *dentry)
return NULL;
if (likely(spin_trylock(&parent->d_lock)))
return parent;
return __lock_parent(dentry);
rcu_read_lock();
spin_unlock(&dentry->d_lock);
parent = __lock_parent(dentry);
rcu_read_unlock();
return parent;
}
/*
* Lock a dentry for feeding it to __dentry_kill().
* Called under rcu_read_lock() and dentry->d_lock; the former
* guarantees that nothing we access will be freed under us.
* Note that dentry is *not* protected from concurrent dentry_kill(),
* d_delete(), etc.
*
* Return false if dentry is busy. Otherwise, return true and have
* that dentry's inode and parent both locked.
*/
static bool lock_for_kill(struct dentry *dentry)
{
struct inode *inode = dentry->d_inode;
struct dentry *parent = dentry->d_parent;
if (unlikely(dentry->d_lockref.count))
return false;
if (inode && unlikely(!spin_trylock(&inode->i_lock)))
goto slow;
if (dentry == parent)
return true;
if (likely(spin_trylock(&parent->d_lock)))
return true;
if (inode)
spin_unlock(&inode->i_lock);
slow:
spin_unlock(&dentry->d_lock);
for (;;) {
if (inode)
spin_lock(&inode->i_lock);
parent = __lock_parent(dentry);
if (likely(inode == dentry->d_inode))
break;
if (inode)
spin_unlock(&inode->i_lock);
inode = dentry->d_inode;
spin_unlock(&dentry->d_lock);
if (parent)
spin_unlock(&parent->d_lock);
}
if (likely(!dentry->d_lockref.count))
return true;
if (inode)
spin_unlock(&inode->i_lock);
if (parent)
spin_unlock(&parent->d_lock);
return false;
}
static inline bool retain_dentry(struct dentry *dentry)
@ -710,45 +764,16 @@ EXPORT_SYMBOL(d_mark_dontcache);
static struct dentry *dentry_kill(struct dentry *dentry)
__releases(dentry->d_lock)
{
struct inode *inode = dentry->d_inode;
struct dentry *parent = NULL;
if (inode && unlikely(!spin_trylock(&inode->i_lock)))
goto slow_positive;
if (!IS_ROOT(dentry)) {
parent = dentry->d_parent;
if (unlikely(!spin_trylock(&parent->d_lock))) {
parent = __lock_parent(dentry);
if (likely(inode || !dentry->d_inode))
goto got_locks;
/* negative that became positive */
if (parent)
spin_unlock(&parent->d_lock);
inode = dentry->d_inode;
goto slow_positive;
}
}
dentry->d_lockref.count--;
__dentry_kill(dentry);
return parent;
slow_positive:
spin_unlock(&dentry->d_lock);
spin_lock(&inode->i_lock);
spin_lock(&dentry->d_lock);
parent = lock_parent(dentry);
got_locks:
dentry->d_lockref.count--;
if (likely(dentry->d_lockref.count == 0)) {
rcu_read_lock();
if (likely(lock_for_kill(dentry))) {
struct dentry *parent = dentry->d_parent;
rcu_read_unlock();
__dentry_kill(dentry);
return parent;
return parent != dentry ? parent : NULL;
}
/* we are keeping it, after all */
if (inode)
spin_unlock(&inode->i_lock);
if (parent)
spin_unlock(&parent->d_lock);
rcu_read_unlock();
spin_unlock(&dentry->d_lock);
return NULL;
}
@ -1100,58 +1125,6 @@ restart:
}
EXPORT_SYMBOL(d_prune_aliases);
/*
* Lock a dentry from shrink list.
* Called under rcu_read_lock() and dentry->d_lock; the former
* guarantees that nothing we access will be freed under us.
* Note that dentry is *not* protected from concurrent dentry_kill(),
* d_delete(), etc.
*
* Return false if dentry has been disrupted or grabbed, leaving
* the caller to kick it off-list. Otherwise, return true and have
* that dentry's inode and parent both locked.
*/
static bool shrink_lock_dentry(struct dentry *dentry)
{
struct inode *inode;
struct dentry *parent;
if (dentry->d_lockref.count)
return false;
inode = dentry->d_inode;
if (inode && unlikely(!spin_trylock(&inode->i_lock))) {
spin_unlock(&dentry->d_lock);
spin_lock(&inode->i_lock);
spin_lock(&dentry->d_lock);
if (unlikely(dentry->d_lockref.count))
goto out;
/* changed inode means that somebody had grabbed it */
if (unlikely(inode != dentry->d_inode))
goto out;
}
parent = dentry->d_parent;
if (IS_ROOT(dentry) || likely(spin_trylock(&parent->d_lock)))
return true;
spin_unlock(&dentry->d_lock);
spin_lock(&parent->d_lock);
if (unlikely(parent != dentry->d_parent)) {
spin_unlock(&parent->d_lock);
spin_lock(&dentry->d_lock);
goto out;
}
spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
if (likely(!dentry->d_lockref.count))
return true;
spin_unlock(&parent->d_lock);
out:
if (inode)
spin_unlock(&inode->i_lock);
return false;
}
static inline void shrink_kill(struct dentry *victim, struct list_head *list)
{
struct dentry *parent = victim->d_parent;
@ -1170,7 +1143,7 @@ void shrink_dentry_list(struct list_head *list)
dentry = list_entry(list->prev, struct dentry, d_lru);
spin_lock(&dentry->d_lock);
rcu_read_lock();
if (!shrink_lock_dentry(dentry)) {
if (!lock_for_kill(dentry)) {
bool can_free;
rcu_read_unlock();
d_shrink_del(dentry);
@ -1614,7 +1587,7 @@ void shrink_dcache_parent(struct dentry *parent)
d_walk(parent, &data, select_collect2);
if (data.victim) {
spin_lock(&data.victim->d_lock);
if (!shrink_lock_dentry(data.victim)) {
if (!lock_for_kill(data.victim)) {
spin_unlock(&data.victim->d_lock);
rcu_read_unlock();
} else {