On Sun, Apr 15, 2018 at 03:39:44AM +0100, Al Viro wrote:

> I really wonder if we should just do the following in
> d_invalidate():
>       * grab ->d_lock on victim, check if it's unhashed,
> unlock and bugger off if it is.  Otherwise, unhash and unlock.
> >From that point on any d_set_mounted() in the subtree will
> fail.
>       * shrink_dcache_parent() to reduce the subtree size.
>       * go through the (hopefully shrunk) subtree, picking
> mountpoints.  detach_mounts() for each of them.
>       * shrink_dcache_parent() if any points had been
> encountered, to kick the now-unpinned stuff.
> 
> As a side benefit, we could probably be gentler on rename_lock
> in d_set_mounted() after that change...

Something like this, IOW:

diff --git a/fs/dcache.c b/fs/dcache.c
index 86d2de63461e..e61c07ff2d95 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -1230,13 +1230,11 @@ enum d_walk_ret {
  * @parent:    start of walk
  * @data:      data passed to @enter() and @finish()
  * @enter:     callback when first entering the dentry
- * @finish:    callback when successfully finished the walk
  *
  * The @enter() and @finish() callbacks are called with d_lock held.
  */
 static void d_walk(struct dentry *parent, void *data,
-                  enum d_walk_ret (*enter)(void *, struct dentry *),
-                  void (*finish)(void *))
+                  enum d_walk_ret (*enter)(void *, struct dentry *))
 {
        struct dentry *this_parent;
        struct list_head *next;
@@ -1325,8 +1323,6 @@ static void d_walk(struct dentry *parent, void *data,
        if (need_seqretry(&rename_lock, seq))
                goto rename_retry;
        rcu_read_unlock();
-       if (finish)
-               finish(data);
 
 out_unlock:
        spin_unlock(&this_parent->d_lock);
@@ -1375,7 +1371,7 @@ int path_has_submounts(const struct path *parent)
        struct check_mount data = { .mnt = parent->mnt, .mounted = 0 };
 
        read_seqlock_excl(&mount_lock);
-       d_walk(parent->dentry, &data, path_check_mount, NULL);
+       d_walk(parent->dentry, &data, path_check_mount);
        read_sequnlock_excl(&mount_lock);
 
        return data.mounted;
@@ -1483,7 +1479,7 @@ void shrink_dcache_parent(struct dentry *parent)
                data.start = parent;
                data.found = 0;
 
-               d_walk(parent, &data, select_collect, NULL);
+               d_walk(parent, &data, select_collect);
                if (!data.found)
                        break;
 
@@ -1518,7 +1514,7 @@ static enum d_walk_ret umount_check(void *_data, struct 
dentry *dentry)
 static void do_one_tree(struct dentry *dentry)
 {
        shrink_dcache_parent(dentry);
-       d_walk(dentry, dentry, umount_check, NULL);
+       d_walk(dentry, dentry, umount_check);
        d_drop(dentry);
        dput(dentry);
 }
@@ -1542,78 +1538,48 @@ void shrink_dcache_for_umount(struct super_block *sb)
        }
 }
 
-struct detach_data {
-       struct select_data select;
-       struct dentry *mountpoint;
-};
-static enum d_walk_ret detach_and_collect(void *_data, struct dentry *dentry)
+static enum d_walk_ret find_submount(void *_data, struct dentry *dentry)
 {
-       struct detach_data *data = _data;
-
        if (d_mountpoint(dentry)) {
                __dget_dlock(dentry);
-               data->mountpoint = dentry;
+               *(struct dentry **)_data = dentry;
                return D_WALK_QUIT;
        }
 
-       return select_collect(&data->select, dentry);
-}
-
-static void check_and_drop(void *_data)
-{
-       struct detach_data *data = _data;
-
-       if (!data->mountpoint && list_empty(&data->select.dispose))
-               __d_drop(data->select.start);
+       return D_WALK_CONTINUE;
 }
 
 /**
  * d_invalidate - detach submounts, prune dcache, and drop
  * @dentry: dentry to invalidate (aka detach, prune and drop)
- *
- * no dcache lock.
- *
- * The final d_drop is done as an atomic operation relative to
- * rename_lock ensuring there are no races with d_set_mounted.  This
- * ensures there are no unhashed dentries on the path to a mountpoint.
  */
 void d_invalidate(struct dentry *dentry)
 {
-       /*
-        * If it's already been dropped, return OK.
-        */
+       bool had_submounts = false;
        spin_lock(&dentry->d_lock);
        if (d_unhashed(dentry)) {
                spin_unlock(&dentry->d_lock);
                return;
        }
+       __d_drop(dentry);
        spin_unlock(&dentry->d_lock);
 
        /* Negative dentries can be dropped without further checks */
-       if (!dentry->d_inode) {
-               d_drop(dentry);
+       if (!dentry->d_inode)
                return;
-       }
 
+       shrink_dcache_parent(dentry);
        for (;;) {
-               struct detach_data data;
-
-               data.mountpoint = NULL;
-               INIT_LIST_HEAD(&data.select.dispose);
-               data.select.start = dentry;
-               data.select.found = 0;
-
-               d_walk(dentry, &data, detach_and_collect, check_and_drop);
-
-               if (!list_empty(&data.select.dispose))
-                       shrink_dentry_list(&data.select.dispose);
-               else if (!data.mountpoint)
+               struct dentry *victim = NULL;
+               d_walk(dentry, &victim, find_submount);
+               if (!victim) {
+                       if (had_submounts)
+                               shrink_dcache_parent(dentry);
                        return;
-
-               if (data.mountpoint) {
-                       detach_mounts(data.mountpoint);
-                       dput(data.mountpoint);
                }
+               had_submounts = true;
+               detach_mounts(victim);
+               dput(victim);
        }
 }
 EXPORT_SYMBOL(d_invalidate);
@@ -3112,7 +3078,7 @@ static enum d_walk_ret d_genocide_kill(void *data, struct 
dentry *dentry)
 
 void d_genocide(struct dentry *parent)
 {
-       d_walk(parent, parent, d_genocide_kill, NULL);
+       d_walk(parent, parent, d_genocide_kill);
 }
 
 EXPORT_SYMBOL(d_genocide);

Reply via email to