On 2020-01-23 09:36, Hans Petter Selasky wrote:
On 2020-01-23 02:24, Gleb Smirnoff wrote:
Author: glebius
Date: Thu Jan 23 01:24:47 2020
New Revision: 357004
URL: https://svnweb.freebsd.org/changeset/base/357004

Log:
   Enter the network epoch for interrupt handlers of INTR_TYPE_NET.
   Provide tunable to limit how many times handlers may be executed
   without reentering epoch.
   Differential Revision:    https://reviews.freebsd.org/D23242

Modified:
   head/sys/kern/kern_intr.c
   head/sys/sys/interrupt.h

Modified: head/sys/kern/kern_intr.c
==============================================================================
--- head/sys/kern/kern_intr.c    Thu Jan 23 01:20:59 2020    (r357003)
+++ head/sys/kern/kern_intr.c    Thu Jan 23 01:24:47 2020    (r357004)
@@ -48,6 +48,7 @@ __FBSDID("$FreeBSD$");
  #include <sys/mutex.h>
  #include <sys/priv.h>
  #include <sys/proc.h>
+#include <sys/epoch.h>
  #include <sys/random.h>
  #include <sys/resourcevar.h>
  #include <sys/sched.h>
@@ -94,6 +95,9 @@ static int intr_storm_threshold = 0;
  SYSCTL_INT(_hw, OID_AUTO, intr_storm_threshold, CTLFLAG_RWTUN,
      &intr_storm_threshold, 0,
      "Number of consecutive interrupts before storm protection is enabled");
+static int intr_epoch_batch = 1000;
+SYSCTL_INT(_hw, OID_AUTO, intr_epoch_batch, CTLFLAG_RWTUN, &intr_epoch_batch, +    0, "Maximum interrupt handler executions without re-entering epoch(9)");
  static TAILQ_HEAD(, intr_event) event_list =
      TAILQ_HEAD_INITIALIZER(event_list);
  static struct mtx event_lock;
@@ -587,6 +591,8 @@ intr_event_add_handler(struct intr_event *ie, const ch
          ih->ih_flags |= IH_MPSAFE;
      if (flags & INTR_ENTROPY)
          ih->ih_flags |= IH_ENTROPY;
+    if (flags & INTR_TYPE_NET)
+        ih->ih_flags |= IH_NET;
      /* We can only have one exclusive handler in a event. */
      mtx_lock(&ie->ie_lock);
@@ -1196,11 +1202,12 @@ ithread_execute_handlers(struct proc *p, struct intr_e
  static void
  ithread_loop(void *arg)
  {
+    struct epoch_tracker et;
      struct intr_thread *ithd;
      struct intr_event *ie;
      struct thread *td;
      struct proc *p;
-    int wake;
+    int wake, epoch_count;
      td = curthread;
      p = td->td_proc;
@@ -1235,8 +1242,21 @@ ithread_loop(void *arg)
           * that the load of ih_need in ithread_execute_handlers()
           * is ordered after the load of it_need here.
           */
-        while (atomic_cmpset_acq_int(&ithd->it_need, 1, 0) != 0)
+        if (ie->ie_hflags & IH_NET) {
+            epoch_count = 0;
+            NET_EPOCH_ENTER(et);
+        }
+        while (atomic_cmpset_acq_int(&ithd->it_need, 1, 0) != 0) {
              ithread_execute_handlers(p, ie);
+            if ((ie->ie_hflags & IH_NET) &&
+                ++epoch_count >= intr_epoch_batch) {
+                NET_EPOCH_EXIT(et);
+                epoch_count = 0;
+                NET_EPOCH_ENTER(et);
+            }
+        }
+        if (ie->ie_hflags & IH_NET)
+            NET_EPOCH_EXIT(et);
          WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread");
          mtx_assert(&Giant, MA_NOTOWNED);

Hi Gleb,

What you want to do here is right, but how it is implemented is wrong, in my opinion.

1) Remove intr_epoch_batch. Most network drivers use interrupt moderation, and a timeout of 1000 iterations can easily become 1 second under heavly load !

2) You need to make a new request function for interrupts which take a pointer to an EPOCH and replace that IH_NET in hflags!

--HPS
_______________________________________________
svn-src-head@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-head
To unsubscribe, send any mail to "svn-src-head-unsubscr...@freebsd.org"

Reply via email to