In the RDE the poll loop needs to know if any additional work is pending.
This is done calling various functions and if anyone has pending work the
timeout is reduced to 0.

Now some of the functions will more often trigger than others. So it is
best to order them accordingly. Check for incoming and outgoing updates
first (these are most frequently true). Then nexthop_pending() since it is
cheap and finally rib_dump_pending().

Also try to make these functions as cheap as possible. In the case for
peer_imsg_pending() this can be done by a simple imsg_pending counter.
This way there is no need to loop over all peers.
Similar changes may be possible for other checks but they are a bit more
complicated (apart from nexthop_pending() which is already minimal).

-- 
:wq Claudio

Index: rde.c
===================================================================
RCS file: /cvs/src/usr.sbin/bgpd/rde.c,v
retrieving revision 1.586
diff -u -p -r1.586 rde.c
--- rde.c       16 Jan 2023 10:37:08 -0000      1.586
+++ rde.c       18 Jan 2023 10:50:54 -0000
@@ -248,8 +248,8 @@ rde_main(int debug, int verbose)
                        }
                }
 
-               if (rib_dump_pending() || rde_update_queue_pending() ||
-                   nexthop_pending() || peer_imsg_pending())
+               if (peer_imsg_pending() || rde_update_queue_pending() ||
+                   nexthop_pending() || rib_dump_pending())
                        timeout = 0;
 
                if (poll(pfd, i, timeout) == -1) {
Index: rde_peer.c
===================================================================
RCS file: /cvs/src/usr.sbin/bgpd/rde_peer.c,v
retrieving revision 1.25
diff -u -p -r1.25 rde_peer.c
--- rde_peer.c  23 Sep 2022 15:49:20 -0000      1.25
+++ rde_peer.c  18 Jan 2023 10:51:57 -0000
@@ -28,6 +28,7 @@
 
 struct peer_tree        peertable;
 struct rde_peer                *peerself;
+static long             imsg_pending;
 
 CTASSERT(sizeof(peerself->recv_eor) * 8 > AID_MAX);
 CTASSERT(sizeof(peerself->sent_eor) * 8 > AID_MAX);
@@ -610,6 +611,7 @@ peer_imsg_push(struct rde_peer *peer, st
                fatal(NULL);
        imsg_move(&iq->imsg, imsg);
        SIMPLEQ_INSERT_TAIL(&peer->imsg_queue, iq, entry);
+       imsg_pending++;
 }
 
 /*
@@ -629,29 +631,18 @@ peer_imsg_pop(struct rde_peer *peer, str
 
        SIMPLEQ_REMOVE_HEAD(&peer->imsg_queue, entry);
        free(iq);
+       imsg_pending--;
 
        return 1;
 }
 
-static void
-peer_imsg_queued(struct rde_peer *peer, void *arg)
-{
-       int *p = arg;
-
-       *p = *p || !SIMPLEQ_EMPTY(&peer->imsg_queue);
-}
-
 /*
  * Check if any imsg are pending, return 0 if none are pending
  */
 int
 peer_imsg_pending(void)
 {
-       int pending = 0;
-
-       peer_foreach(peer_imsg_queued, &pending);
-
-       return pending;
+       return imsg_pending != 0;
 }
 
 /*
@@ -665,5 +656,6 @@ peer_imsg_flush(struct rde_peer *peer)
        while ((iq = SIMPLEQ_FIRST(&peer->imsg_queue)) != NULL) {
                SIMPLEQ_REMOVE_HEAD(&peer->imsg_queue, entry);
                free(iq);
+               imsg_pending--;
        }
 }

Reply via email to