The branch main has been updated by rscheff:

URL: 
https://cgit.FreeBSD.org/src/commit/?id=529a2a0f2765f6c57c50a5af6be242c03bf714e3

commit 529a2a0f2765f6c57c50a5af6be242c03bf714e3
Author:     Richard Scheffenegger <rsch...@freebsd.org>
AuthorDate: 2021-04-01 08:00:32 +0000
Commit:     Richard Scheffenegger <rsch...@freebsd.org>
CommitDate: 2021-04-01 08:03:30 +0000

    tcp: For hostcache performance, use atomics instead of counters
    
    As accessing the tcp hostcache happens frequently on some
    classes of servers, it was recommended to use atomic_add/subtract
    rather than (per-CPU distributed) counters, which have to be
    summed up at high cost to cache efficiency.
    
    PR: 254333
    MFC after: 2 weeks
    Sponsored by: NetApp, Inc.
    Reviewed By: #transport, tuexen, jtl
    Differential Revision: https://reviews.freebsd.org/D29522
---
 sys/netinet/tcp_hostcache.c | 24 +++++++++++-------------
 sys/netinet/tcp_hostcache.h |  2 +-
 2 files changed, 12 insertions(+), 14 deletions(-)

diff --git a/sys/netinet/tcp_hostcache.c b/sys/netinet/tcp_hostcache.c
index 67da97405c3f..87023cc1a760 100644
--- a/sys/netinet/tcp_hostcache.c
+++ b/sys/netinet/tcp_hostcache.c
@@ -147,8 +147,8 @@ SYSCTL_UINT(_net_inet_tcp_hostcache, OID_AUTO, bucketlimit,
     CTLFLAG_VNET | CTLFLAG_RDTUN, &VNET_NAME(tcp_hostcache.bucket_limit), 0,
     "Per-bucket hash limit for hostcache");
 
-SYSCTL_COUNTER_U64(_net_inet_tcp_hostcache, OID_AUTO, count, CTLFLAG_VNET | 
CTLFLAG_RD,
-     &VNET_NAME(tcp_hostcache.cache_count),
+SYSCTL_UINT(_net_inet_tcp_hostcache, OID_AUTO, count, CTLFLAG_VNET | 
CTLFLAG_RD,
+     &VNET_NAME(tcp_hostcache.cache_count), 0,
     "Current number of entries in hostcache");
 
 SYSCTL_INT(_net_inet_tcp_hostcache, OID_AUTO, expire, CTLFLAG_VNET | 
CTLFLAG_RW,
@@ -199,8 +199,7 @@ tcp_hc_init(void)
        /*
         * Initialize hostcache structures.
         */
-       V_tcp_hostcache.cache_count = counter_u64_alloc(M_WAITOK);
-       counter_u64_zero(V_tcp_hostcache.cache_count);
+       atomic_store_int(&V_tcp_hostcache.cache_count, 0);
        V_tcp_hostcache.hashsize = TCP_HOSTCACHE_HASHSIZE;
        V_tcp_hostcache.bucket_limit = TCP_HOSTCACHE_BUCKETLIMIT;
        V_tcp_hostcache.expire = TCP_HOSTCACHE_EXPIRE;
@@ -268,9 +267,6 @@ tcp_hc_destroy(void)
        /* Purge all hc entries. */
        tcp_hc_purge_internal(1);
 
-       /* Release the counter */
-       counter_u64_free(V_tcp_hostcache.cache_count);
-
        /* Free the uma zone and the allocated hash table. */
        uma_zdestroy(V_tcp_hostcache.zone);
 
@@ -378,7 +374,7 @@ tcp_hc_insert(struct in_conninfo *inc)
         * If the bucket limit is reached, reuse the least-used element.
         */
        if (hc_head->hch_length >= V_tcp_hostcache.bucket_limit ||
-           counter_u64_fetch(V_tcp_hostcache.cache_count) >= 
V_tcp_hostcache.cache_limit) {
+           atomic_load_int(&V_tcp_hostcache.cache_count) >= 
V_tcp_hostcache.cache_limit) {
                hc_entry = TAILQ_LAST(&hc_head->hch_bucket, hc_qhead);
                /*
                 * At first we were dropping the last element, just to
@@ -395,7 +391,7 @@ tcp_hc_insert(struct in_conninfo *inc)
                }
                TAILQ_REMOVE(&hc_head->hch_bucket, hc_entry, rmx_q);
                V_tcp_hostcache.hashbase[hash].hch_length--;
-               counter_u64_add(V_tcp_hostcache.cache_count, -1);
+               atomic_subtract_int(&V_tcp_hostcache.cache_count, 1);
                TCPSTAT_INC(tcps_hc_bucketoverflow);
 #if 0
                uma_zfree(V_tcp_hostcache.zone, hc_entry);
@@ -428,7 +424,7 @@ tcp_hc_insert(struct in_conninfo *inc)
         */
        TAILQ_INSERT_HEAD(&hc_head->hch_bucket, hc_entry, rmx_q);
        V_tcp_hostcache.hashbase[hash].hch_length++;
-       counter_u64_add(V_tcp_hostcache.cache_count, 1);
+       atomic_add_int(&V_tcp_hostcache.cache_count, 1);
        TCPSTAT_INC(tcps_hc_added);
 
        return hc_entry;
@@ -644,7 +640,8 @@ sysctl_tcp_hc_list(SYSCTL_HANDLER_ARGS)
 
        /* Optimize Buffer length query by sbin/sysctl */
        if (req->oldptr == NULL) {
-               len = (counter_u64_fetch(V_tcp_hostcache.cache_count) + 1) * 
linesize;
+               len = (atomic_load_int(&V_tcp_hostcache.cache_count) + 1) *
+                       linesize;
                return (SYSCTL_OUT(req, NULL, len));
        }
 
@@ -654,7 +651,8 @@ sysctl_tcp_hc_list(SYSCTL_HANDLER_ARGS)
        }
 
        /* Use a buffer sized for one full bucket */
-       sbuf_new_for_sysctl(&sb, NULL, V_tcp_hostcache.bucket_limit * linesize, 
req);
+       sbuf_new_for_sysctl(&sb, NULL, V_tcp_hostcache.bucket_limit *
+               linesize, req);
 
        sbuf_printf(&sb,
                "\nIP address        MTU  SSTRESH      RTT   RTTVAR "
@@ -716,7 +714,7 @@ tcp_hc_purge_internal(int all)
                                              hc_entry, rmx_q);
                                uma_zfree(V_tcp_hostcache.zone, hc_entry);
                                V_tcp_hostcache.hashbase[i].hch_length--;
-                               counter_u64_add(V_tcp_hostcache.cache_count, 
-1);
+                               
atomic_subtract_int(&V_tcp_hostcache.cache_count, 1);
                        } else
                                hc_entry->rmx_expire -= V_tcp_hostcache.prune;
                }
diff --git a/sys/netinet/tcp_hostcache.h b/sys/netinet/tcp_hostcache.h
index b5237392acc2..2f7035c0c6af 100644
--- a/sys/netinet/tcp_hostcache.h
+++ b/sys/netinet/tcp_hostcache.h
@@ -74,7 +74,7 @@ struct tcp_hostcache {
        u_int           hashsize;
        u_int           hashmask;
        u_int           bucket_limit;
-       counter_u64_t   cache_count;
+       u_int           cache_count;
        u_int           cache_limit;
        int             expire;
        int             prune;
_______________________________________________
dev-commits-src-all@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/dev-commits-src-all
To unsubscribe, send any mail to "dev-commits-src-all-unsubscr...@freebsd.org"

Reply via email to