The branch releng/13.0 has been updated by kib:

URL: 
https://cgit.FreeBSD.org/src/commit/?id=eadca59970c1ad89d79991423488adc3cd113f1f

commit eadca59970c1ad89d79991423488adc3cd113f1f
Author:     Konstantin Belousov <k...@freebsd.org>
AuthorDate: 2021-01-31 18:39:49 +0000
Commit:     Konstantin Belousov <k...@freebsd.org>
CommitDate: 2021-02-25 20:50:22 +0000

    ffs softdep: Force processing of VI_OWEINACT vnodes when there is inode 
shortage
    
    Approved by:    re (delphij, gjb)
    
    (cherry picked from commit 28703d27130c9cb7e7830ff53155c379a502c248)
---
 sys/ufs/ffs/ffs_softdep.c | 61 +++++++++++++++++++++++++++++++++++++++++++++++
 sys/ufs/ffs/softdep.h     |  2 ++
 2 files changed, 63 insertions(+)

diff --git a/sys/ufs/ffs/ffs_softdep.c b/sys/ufs/ffs/ffs_softdep.c
index 8c52139687f9..786fb43c7d81 100644
--- a/sys/ufs/ffs/ffs_softdep.c
+++ b/sys/ufs/ffs/ffs_softdep.c
@@ -1311,6 +1311,7 @@ static int softdep_flushcache = 0; /* Should we do 
BIO_FLUSH? */
  */
 static int stat_flush_threads; /* number of softdep flushing threads */
 static int stat_worklist_push; /* number of worklist cleanups */
+static int stat_delayed_inact; /* number of delayed inactivation cleanups */
 static int stat_blk_limit_push;        /* number of times block limit neared */
 static int stat_ino_limit_push;        /* number of times inode limit neared */
 static int stat_blk_limit_hit; /* number of times block slowdown imposed */
@@ -1344,6 +1345,8 @@ SYSCTL_INT(_debug_softdep, OID_AUTO, flush_threads, 
CTLFLAG_RD,
     &stat_flush_threads, 0, "");
 SYSCTL_INT(_debug_softdep, OID_AUTO, worklist_push,
     CTLFLAG_RW | CTLFLAG_STATS, &stat_worklist_push, 0,"");
+SYSCTL_INT(_debug_softdep, OID_AUTO, delayed_inactivations, CTLFLAG_RD,
+    &stat_delayed_inact, 0, "");
 SYSCTL_INT(_debug_softdep, OID_AUTO, blk_limit_push,
     CTLFLAG_RW | CTLFLAG_STATS, &stat_blk_limit_push, 0,"");
 SYSCTL_INT(_debug_softdep, OID_AUTO, ino_limit_push,
@@ -13707,6 +13710,37 @@ softdep_slowdown(vp)
        return (1);
 }
 
+static int
+softdep_request_cleanup_filter(struct vnode *vp, void *arg __unused)
+{
+       return ((vp->v_iflag & VI_OWEINACT) != 0 && vp->v_usecount == 0 &&
+           ((vp->v_vflag & VV_NOSYNC) != 0 || VTOI(vp)->i_effnlink == 0));
+}
+
+static void
+softdep_request_cleanup_inactivate(struct mount *mp)
+{
+       struct vnode *vp, *mvp;
+       int error;
+
+       MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, softdep_request_cleanup_filter,
+           NULL) {
+               vholdl(vp);
+               vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK | LK_RETRY);
+               VI_LOCK(vp);
+               if (vp->v_data != NULL && vp->v_usecount == 0) {
+                       while ((vp->v_iflag & VI_OWEINACT) != 0) {
+                               error = vinactive(vp);
+                               if (error != 0 && error != ERELOOKUP)
+                                       break;
+                       }
+                       atomic_add_int(&stat_delayed_inact, 1);
+               }
+               VOP_UNLOCK(vp);
+               vdropl(vp);
+       }
+}
+
 /*
  * Called by the allocation routines when they are about to fail
  * in the hope that we can free up the requested resource (inodes
@@ -13819,6 +13853,33 @@ retry:
                        stat_worklist_push += 1;
                FREE_LOCK(ump);
        }
+
+       /*
+        * Check that there are vnodes pending inactivation.  As they
+        * have been unlinked, inactivating them will free up their
+        * inodes.
+        */
+       ACQUIRE_LOCK(ump);
+       if (resource == FLUSH_INODES_WAIT &&
+           fs->fs_cstotal.cs_nifree <= needed &&
+           fs->fs_pendinginodes <= needed) {
+               if ((ump->um_softdep->sd_flags & FLUSH_DI_ACTIVE) == 0) {
+                       ump->um_softdep->sd_flags |= FLUSH_DI_ACTIVE;
+                       FREE_LOCK(ump);
+                       softdep_request_cleanup_inactivate(mp);
+                       ACQUIRE_LOCK(ump);
+                       ump->um_softdep->sd_flags &= ~FLUSH_DI_ACTIVE;
+                       wakeup(&ump->um_softdep->sd_flags);
+               } else {
+                       while ((ump->um_softdep->sd_flags &
+                           FLUSH_DI_ACTIVE) != 0) {
+                               msleep(&ump->um_softdep->sd_flags,
+                                   LOCK_PTR(ump), PVM, "ffsvina", hz);
+                       }
+               }
+       }
+       FREE_LOCK(ump);
+
        /*
         * If we still need resources and there are no more worklist
         * entries to process to obtain them, we have to start flushing
diff --git a/sys/ufs/ffs/softdep.h b/sys/ufs/ffs/softdep.h
index 868ada00f2dc..3493aadafc98 100644
--- a/sys/ufs/ffs/softdep.h
+++ b/sys/ufs/ffs/softdep.h
@@ -1086,6 +1086,8 @@ struct mount_softdeps {
 #define FLUSH_CLEANUP  0x0002  /* need to clear out softdep structures */
 #define        FLUSH_STARTING  0x0004  /* flush thread not yet started */
 #define        FLUSH_RC_ACTIVE 0x0008  /* a thread is flushing the mount point 
*/
+#define        FLUSH_DI_ACTIVE 0x0010  /* a thread is processing delayed
+                                  inactivations */
 
 /*
  * Keep the old names from when these were in the ufsmount structure.
_______________________________________________
dev-commits-src-all@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/dev-commits-src-all
To unsubscribe, send any mail to "dev-commits-src-all-unsubscr...@freebsd.org"

Reply via email to