2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-17 17:53:56 +08:00

Merge branch 'work.dcache' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs

Pull dcache updates from Al Viro:
 "This is the first part of dealing with livelocks etc around
  shrink_dcache_parent()."

* 'work.dcache' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs:
  restore cond_resched() in shrink_dcache_parent()
  dput(): turn into explicit while() loop
  dcache: move cond_resched() into the end of __dentry_kill()
  d_walk(): kill 'finish' callback
  d_invalidate(): unhash immediately
This commit is contained in:
Linus Torvalds 2018-06-04 08:57:36 -07:00
commit 06c86e66d6

View File

@ -580,6 +580,7 @@ static void __dentry_kill(struct dentry *dentry)
spin_unlock(&dentry->d_lock);
if (likely(can_free))
dentry_free(dentry);
cond_resched();
}
static struct dentry *__lock_parent(struct dentry *dentry)
@ -827,30 +828,24 @@ static inline bool fast_dput(struct dentry *dentry)
*/
void dput(struct dentry *dentry)
{
if (unlikely(!dentry))
return;
while (dentry) {
might_sleep();
repeat:
might_sleep();
rcu_read_lock();
if (likely(fast_dput(dentry))) {
rcu_read_unlock();
return;
}
rcu_read_lock();
if (likely(fast_dput(dentry))) {
/* Slow case: now with the dentry lock held */
rcu_read_unlock();
return;
}
/* Slow case: now with the dentry lock held */
rcu_read_unlock();
if (likely(retain_dentry(dentry))) {
spin_unlock(&dentry->d_lock);
return;
}
if (likely(retain_dentry(dentry))) {
spin_unlock(&dentry->d_lock);
return;
}
dentry = dentry_kill(dentry);
if (dentry) {
cond_resched();
goto repeat;
dentry = dentry_kill(dentry);
}
}
EXPORT_SYMBOL(dput);
@ -1052,8 +1047,6 @@ static void shrink_dentry_list(struct list_head *list)
while (!list_empty(list)) {
struct dentry *dentry, *parent;
cond_resched();
dentry = list_entry(list->prev, struct dentry, d_lru);
spin_lock(&dentry->d_lock);
rcu_read_lock();
@ -1230,13 +1223,11 @@ enum d_walk_ret {
* @parent: start of walk
* @data: data passed to @enter() and @finish()
* @enter: callback when first entering the dentry
* @finish: callback when successfully finished the walk
*
* The @enter() and @finish() callbacks are called with d_lock held.
* The @enter() callbacks are called with d_lock held.
*/
static void d_walk(struct dentry *parent, void *data,
enum d_walk_ret (*enter)(void *, struct dentry *),
void (*finish)(void *))
enum d_walk_ret (*enter)(void *, struct dentry *))
{
struct dentry *this_parent;
struct list_head *next;
@ -1325,8 +1316,6 @@ ascend:
if (need_seqretry(&rename_lock, seq))
goto rename_retry;
rcu_read_unlock();
if (finish)
finish(data);
out_unlock:
spin_unlock(&this_parent->d_lock);
@ -1375,7 +1364,7 @@ int path_has_submounts(const struct path *parent)
struct check_mount data = { .mnt = parent->mnt, .mounted = 0 };
read_seqlock_excl(&mount_lock);
d_walk(parent->dentry, &data, path_check_mount, NULL);
d_walk(parent->dentry, &data, path_check_mount);
read_sequnlock_excl(&mount_lock);
return data.mounted;
@ -1483,11 +1472,16 @@ void shrink_dcache_parent(struct dentry *parent)
data.start = parent;
data.found = 0;
d_walk(parent, &data, select_collect, NULL);
d_walk(parent, &data, select_collect);
if (!list_empty(&data.dispose)) {
shrink_dentry_list(&data.dispose);
continue;
}
cond_resched();
if (!data.found)
break;
shrink_dentry_list(&data.dispose);
}
}
EXPORT_SYMBOL(shrink_dcache_parent);
@ -1518,7 +1512,7 @@ static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
static void do_one_tree(struct dentry *dentry)
{
shrink_dcache_parent(dentry);
d_walk(dentry, dentry, umount_check, NULL);
d_walk(dentry, dentry, umount_check);
d_drop(dentry);
dput(dentry);
}
@ -1542,78 +1536,48 @@ void shrink_dcache_for_umount(struct super_block *sb)
}
}
struct detach_data {
struct select_data select;
struct dentry *mountpoint;
};
static enum d_walk_ret detach_and_collect(void *_data, struct dentry *dentry)
static enum d_walk_ret find_submount(void *_data, struct dentry *dentry)
{
struct detach_data *data = _data;
struct dentry **victim = _data;
if (d_mountpoint(dentry)) {
__dget_dlock(dentry);
data->mountpoint = dentry;
*victim = dentry;
return D_WALK_QUIT;
}
return select_collect(&data->select, dentry);
}
static void check_and_drop(void *_data)
{
struct detach_data *data = _data;
if (!data->mountpoint && list_empty(&data->select.dispose))
__d_drop(data->select.start);
return D_WALK_CONTINUE;
}
/**
* d_invalidate - detach submounts, prune dcache, and drop
* @dentry: dentry to invalidate (aka detach, prune and drop)
*
* no dcache lock.
*
* The final d_drop is done as an atomic operation relative to
* rename_lock ensuring there are no races with d_set_mounted. This
* ensures there are no unhashed dentries on the path to a mountpoint.
*/
void d_invalidate(struct dentry *dentry)
{
/*
* If it's already been dropped, return OK.
*/
bool had_submounts = false;
spin_lock(&dentry->d_lock);
if (d_unhashed(dentry)) {
spin_unlock(&dentry->d_lock);
return;
}
__d_drop(dentry);
spin_unlock(&dentry->d_lock);
/* Negative dentries can be dropped without further checks */
if (!dentry->d_inode) {
d_drop(dentry);
if (!dentry->d_inode)
return;
}
shrink_dcache_parent(dentry);
for (;;) {
struct detach_data data;
data.mountpoint = NULL;
INIT_LIST_HEAD(&data.select.dispose);
data.select.start = dentry;
data.select.found = 0;
d_walk(dentry, &data, detach_and_collect, check_and_drop);
if (!list_empty(&data.select.dispose))
shrink_dentry_list(&data.select.dispose);
else if (!data.mountpoint)
struct dentry *victim = NULL;
d_walk(dentry, &victim, find_submount);
if (!victim) {
if (had_submounts)
shrink_dcache_parent(dentry);
return;
if (data.mountpoint) {
detach_mounts(data.mountpoint);
dput(data.mountpoint);
}
had_submounts = true;
detach_mounts(victim);
dput(victim);
}
}
EXPORT_SYMBOL(d_invalidate);
@ -3134,7 +3098,7 @@ static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry)
void d_genocide(struct dentry *parent)
{
d_walk(parent, parent, d_genocide_kill, NULL);
d_walk(parent, parent, d_genocide_kill);
}
EXPORT_SYMBOL(d_genocide);