Make size of Tx and Rx rings configurable

Required size of event queue is calculated now.

Submitted by:   Andrew Rybchenko <arybchenko at solarflare.com>
Sponsored by:   Solarflare Communications, Inc.
The information contained in this message is confidential and is intended for 
the addressee(s) only. If you have received this message in error, please 
notify the sender immediately and delete the message. Unless you are an 
addressee (or authorized to receive for an addressee), you may not use, copy or 
disclose to anyone this message or any information contained in this message. 
The unauthorized use, disclosure, copying or alteration of this message is 
strictly prohibited.
Make size of Tx and Rx rings configurable

Required size of event queue is calculated now.

Submitted by:   Andrew Rybchenko <arybchenko at solarflare.com>
Sponsored by:   Solarflare Communications, Inc.

diff -r abfcbc1ff6b9 share/man/man4/sfxge.4
--- a/share/man/man4/sfxge.4    Thu Sep 25 15:59:36 2014 +0400
+++ b/share/man/man4/sfxge.4    Thu Sep 25 16:06:37 2014 +0400
@@ -76,6 +76,32 @@
 .Nm
 driver supports all 10Gb Ethernet adapters based on Solarflare SFC9000
 family controllers.
+.Sh LOADER TUNABLES
+Tunables can be set at the
+.Xr loader 8
+prompt before booting the kernel or stored in
+.Xr loader.conf 5 .
+Actual values can be obtained using
+.Xr sysctl 8 .
+.Bl -tag -width indent
+.It Va hw.sfxge.rx_ring
+Maximum number of descriptors in a receive queue ring.
+Supported values are: 512, 1024, 2048 and 4096.
+.It Va hw.sfxge.tx_ring
+Maximum number of descriptors in a transmit queue ring.
+Supported values are: 512, 1024, 2048 and 4096.
+.It Va hw.sfxge.tx_dpl_get_max
+The maximum length of the deferred packet 'get-list' for queued transmit
+packets, used only if the transmit queue lock can be acquired.
+If packet is dropped, \fItx_early_drops\fR counter grows and local sender
+gets ENOBUFS error.
+Value must be greater than 0.
+.It Va hw.sfxge.tx_dpl_put_max
+The maximum length of the deferred packet 'put-list' for queued transmit
+packets, used if the transmit queue lock cannot be acquired.
+If packet is dropped, \fItx_early_drops\fR counter grows and local sender
+gets ENOBUFS error.
+Value must be greater or equal to 0.
 .Sh SUPPORT
 For general information and support,
 go to the Solarflare support website at:
diff -r abfcbc1ff6b9 sys/dev/sfxge/sfxge.c
--- a/sys/dev/sfxge/sfxge.c     Thu Sep 25 15:59:36 2014 +0400
+++ b/sys/dev/sfxge/sfxge.c     Thu Sep 25 16:06:37 2014 +0400
@@ -42,6 +42,7 @@
 #include <sys/taskqueue.h>
 #include <sys/sockio.h>
 #include <sys/sysctl.h>
+#include <sys/syslog.h>
 
 #include <dev/pci/pcireg.h>
 #include <dev/pci/pcivar.h>
@@ -67,6 +68,25 @@
 
 MALLOC_DEFINE(M_SFXGE, "sfxge", "Solarflare 10GigE driver");
 
+
+SYSCTL_NODE(_hw, OID_AUTO, sfxge, CTLFLAG_RD, 0,
+           "SFXGE driver parameters");
+
+#define        SFXGE_PARAM_RX_RING     SFXGE_PARAM(rx_ring)
+static int sfxge_rx_ring_entries = SFXGE_NDESCS;
+TUNABLE_INT(SFXGE_PARAM_RX_RING, &sfxge_rx_ring_entries);
+SYSCTL_INT(_hw_sfxge, OID_AUTO, rx_ring, CTLFLAG_RDTUN,
+          &sfxge_rx_ring_entries, 0,
+          "Maximum number of descriptors in a receive ring");
+
+#define        SFXGE_PARAM_TX_RING     SFXGE_PARAM(tx_ring)
+static int sfxge_tx_ring_entries = SFXGE_NDESCS;
+TUNABLE_INT(SFXGE_PARAM_TX_RING, &sfxge_tx_ring_entries);
+SYSCTL_INT(_hw_sfxge, OID_AUTO, tx_ring, CTLFLAG_RDTUN,
+          &sfxge_tx_ring_entries, 0,
+          "Maximum number of descriptors in a transmit ring");
+
+
 static void
 sfxge_reset(void *arg, int npending);
 
@@ -314,8 +334,8 @@
        ifp->if_qflush = sfxge_if_qflush;
 #else
        ifp->if_start = sfxge_if_start;
-       IFQ_SET_MAXLEN(&ifp->if_snd, SFXGE_NDESCS - 1);
-       ifp->if_snd.ifq_drv_maxlen = SFXGE_NDESCS - 1;
+       IFQ_SET_MAXLEN(&ifp->if_snd, sc->txq_entries - 1);
+       ifp->if_snd.ifq_drv_maxlen = sc->txq_entries - 1;
        IFQ_SET_READY(&ifp->if_snd);
 
        mtx_init(&sc->tx_lock, "txq", NULL, MTX_DEF);
@@ -414,6 +434,26 @@
                goto fail3;
        sc->enp = enp;
 
+       if (!ISP2(sfxge_rx_ring_entries) ||
+           !(sfxge_rx_ring_entries & EFX_RXQ_NDESCS_MASK)) {
+               log(LOG_ERR, "%s=%d must be power of 2 from %u to %u",
+                   SFXGE_PARAM_RX_RING, sfxge_rx_ring_entries,
+                   EFX_RXQ_MINNDESCS, EFX_RXQ_MAXNDESCS);
+               error = EINVAL;
+               goto fail_rx_ring_entries;
+       }
+       sc->rxq_entries = sfxge_rx_ring_entries;
+
+       if (!ISP2(sfxge_tx_ring_entries) ||
+           !(sfxge_tx_ring_entries & EFX_TXQ_NDESCS_MASK)) {
+               log(LOG_ERR, "%s=%d must be power of 2 from %u to %u",
+                   SFXGE_PARAM_TX_RING, sfxge_tx_ring_entries,
+                   EFX_TXQ_MINNDESCS, EFX_TXQ_MAXNDESCS);
+               error = EINVAL;
+               goto fail_tx_ring_entries;
+       }
+       sc->txq_entries = sfxge_tx_ring_entries;
+
        /* Initialize MCDI to talk to the microcontroller. */
        if ((error = sfxge_mcdi_init(sc)) != 0)
                goto fail4;
@@ -486,6 +526,8 @@
        sfxge_mcdi_fini(sc);
 
 fail4:
+fail_tx_ring_entries:
+fail_rx_ring_entries:
        sc->enp = NULL;
        efx_nic_destroy(enp);
        mtx_destroy(&sc->enp_lock);
diff -r abfcbc1ff6b9 sys/dev/sfxge/sfxge.h
--- a/sys/dev/sfxge/sfxge.h     Thu Sep 25 15:59:36 2014 +0400
+++ b/sys/dev/sfxge/sfxge.h     Thu Sep 25 16:06:37 2014 +0400
@@ -87,6 +87,8 @@
 #include "sfxge_rx.h"
 #include "sfxge_tx.h"
 
+#define        ROUNDUP_POW_OF_TWO(_n)  (1ULL << flsl((_n) - 1))
+
 #define        SFXGE_IP_ALIGN  2
 
 #define        SFXGE_ETHERTYPE_LOOPBACK        0x9000  /* Xerox loopback */
@@ -106,6 +108,7 @@
 
        enum sfxge_evq_state    init_state;
        unsigned int            index;
+       unsigned int            entries;
        efsys_mem_t             mem;
        unsigned int            buf_base_id;
 
@@ -121,7 +124,6 @@
        struct sfxge_txq        **txqs;
 };
 
-#define        SFXGE_NEVS      4096
 #define        SFXGE_NDESCS    1024
 #define        SFXGE_MODERATION        30
 
@@ -209,6 +211,9 @@
        efx_nic_t                       *enp;
        struct mtx                      enp_lock;
 
+       unsigned int                    rxq_entries;
+       unsigned int                    txq_entries;
+
        bus_dma_tag_t                   parent_dma_tag;
        efsys_bar_t                     bar;
 
@@ -246,6 +251,10 @@
 #define        SFXGE_LINK_UP(sc) ((sc)->port.link_mode != EFX_LINK_DOWN)
 #define        SFXGE_RUNNING(sc) ((sc)->ifnet->if_drv_flags & IFF_DRV_RUNNING)
 
+#define        SFXGE_PARAM(_name)      "hw.sfxge." #_name
+
+SYSCTL_DECL(_hw_sfxge);
+
 /*
  * From sfxge.c.
  */
diff -r abfcbc1ff6b9 sys/dev/sfxge/sfxge_ev.c
--- a/sys/dev/sfxge/sfxge_ev.c  Thu Sep 25 15:59:36 2014 +0400
+++ b/sys/dev/sfxge/sfxge_ev.c  Thu Sep 25 16:06:37 2014 +0400
@@ -102,7 +102,7 @@
        if (rxq->init_state != SFXGE_RXQ_STARTED)
                goto done;
 
-       expected = rxq->pending++ & (SFXGE_NDESCS - 1);
+       expected = rxq->pending++ & rxq->ptr_mask;
        if (id != expected) {
                evq->exception = B_TRUE;
 
@@ -247,10 +247,10 @@
        if (txq->init_state != SFXGE_TXQ_STARTED)
                goto done;
 
-       stop = (id + 1) & (SFXGE_NDESCS - 1);
-       id = txq->pending & (SFXGE_NDESCS - 1);
+       stop = (id + 1) & txq->ptr_mask;
+       id = txq->pending & txq->ptr_mask;
 
-       delta = (stop >= id) ? (stop - id) : (SFXGE_NDESCS - id + stop);
+       delta = (stop >= id) ? (stop - id) : (txq->entries - id + stop);
        txq->pending += delta;
 
        evq->tx_done++;
@@ -635,7 +635,7 @@
 
        efx_ev_qdestroy(evq->common);
        efx_sram_buf_tbl_clear(sc->enp, evq->buf_base_id,
-           EFX_EVQ_NBUFS(SFXGE_NEVS));
+           EFX_EVQ_NBUFS(evq->entries));
        mtx_unlock(&evq->lock);
 }
 
@@ -654,15 +654,15 @@
            ("evq->init_state != SFXGE_EVQ_INITIALIZED"));
 
        /* Clear all events. */
-       (void)memset(esmp->esm_base, 0xff, EFX_EVQ_SIZE(SFXGE_NEVS));
+       (void)memset(esmp->esm_base, 0xff, EFX_EVQ_SIZE(evq->entries));
 
        /* Program the buffer table. */
        if ((rc = efx_sram_buf_tbl_set(sc->enp, evq->buf_base_id, esmp,
-           EFX_EVQ_NBUFS(SFXGE_NEVS))) != 0)
-               return rc;
+           EFX_EVQ_NBUFS(evq->entries))) != 0)
+               return (rc);
 
        /* Create the common code event queue. */
-       if ((rc = efx_ev_qcreate(sc->enp, index, esmp, SFXGE_NEVS,
+       if ((rc = efx_ev_qcreate(sc->enp, index, esmp, evq->entries,
            evq->buf_base_id, &evq->common)) != 0)
                goto fail;
 
@@ -705,7 +705,7 @@
        efx_ev_qdestroy(evq->common);
 fail:
        efx_sram_buf_tbl_clear(sc->enp, evq->buf_base_id,
-           EFX_EVQ_NBUFS(SFXGE_NEVS));
+           EFX_EVQ_NBUFS(evq->entries));
 
        return (rc);
 }
@@ -802,15 +802,31 @@
        sc->evq[index] = evq;
        esmp = &evq->mem;
 
+       /* Build an event queue with room for one event per tx and rx buffer,
+        * plus some extra for link state events and MCDI completions.
+        * There are three tx queues in the first event queue and one in
+        * other.
+        */
+       if (index == 0)
+               evq->entries =
+                       ROUNDUP_POW_OF_TWO(sc->rxq_entries +
+                                          3 * sc->txq_entries +
+                                          128);
+       else
+               evq->entries =
+                       ROUNDUP_POW_OF_TWO(sc->rxq_entries +
+                                          sc->txq_entries +
+                                          128);
+
        /* Initialise TX completion list */
        evq->txqs = &evq->txq;
 
        /* Allocate DMA space. */
-       if ((rc = sfxge_dma_alloc(sc, EFX_EVQ_SIZE(SFXGE_NEVS), esmp)) != 0)
+       if ((rc = sfxge_dma_alloc(sc, EFX_EVQ_SIZE(evq->entries), esmp)) != 0)
                return (rc);
 
        /* Allocate buffer table entries. */
-       sfxge_sram_buf_tbl_alloc(sc, EFX_EVQ_NBUFS(SFXGE_NEVS),
+       sfxge_sram_buf_tbl_alloc(sc, EFX_EVQ_NBUFS(evq->entries),
                                 &evq->buf_base_id);
 
        mtx_init(&evq->lock, "evq", NULL, MTX_DEF);
diff -r abfcbc1ff6b9 sys/dev/sfxge/sfxge_rx.c
--- a/sys/dev/sfxge/sfxge_rx.c  Thu Sep 25 15:59:36 2014 +0400
+++ b/sys/dev/sfxge/sfxge_rx.c  Thu Sep 25 16:06:37 2014 +0400
@@ -54,8 +54,7 @@
 #include "sfxge.h"
 #include "sfxge_rx.h"
 
-#define        RX_REFILL_THRESHOLD     (EFX_RXQ_LIMIT(SFXGE_NDESCS) * 9 / 10)
-#define        RX_REFILL_THRESHOLD_2   (RX_REFILL_THRESHOLD / 2)
+#define        RX_REFILL_THRESHOLD(_entries)   (EFX_RXQ_LIMIT(_entries) * 9 / 
10)
 
 /* Size of the LRO hash table.  Must be a power of 2.  A larger table
  * means we can accelerate a larger number of streams.
@@ -214,11 +213,11 @@
                return;
 
        rxfill = rxq->added - rxq->completed;
-       KASSERT(rxfill <= EFX_RXQ_LIMIT(SFXGE_NDESCS),
-           ("rxfill > EFX_RXQ_LIMIT(SFXGE_NDESCS)"));
-       ntodo = min(EFX_RXQ_LIMIT(SFXGE_NDESCS) - rxfill, target);
-       KASSERT(ntodo <= EFX_RXQ_LIMIT(SFXGE_NDESCS),
-           ("ntodo > EFX_RQX_LIMIT(SFXGE_NDESCS)"));
+       KASSERT(rxfill <= EFX_RXQ_LIMIT(rxq->entries),
+           ("rxfill > EFX_RXQ_LIMIT(rxq->entries)"));
+       ntodo = min(EFX_RXQ_LIMIT(rxq->entries) - rxfill, target);
+       KASSERT(ntodo <= EFX_RXQ_LIMIT(rxq->entries),
+           ("ntodo > EFX_RQX_LIMIT(rxq->entries)"));
 
        if (ntodo == 0)
                return;
@@ -231,7 +230,7 @@
                bus_dma_segment_t seg;
                struct mbuf *m;
 
-               id = (rxq->added + batch) & (SFXGE_NDESCS - 1);
+               id = (rxq->added + batch) & rxq->ptr_mask;
                rx_desc = &rxq->queue[id];
                KASSERT(rx_desc->mbuf == NULL, ("rx_desc->mbuf != NULL"));
 
@@ -274,7 +273,7 @@
                return;
 
        /* Make sure the queue is full */
-       sfxge_rx_qfill(rxq, EFX_RXQ_LIMIT(SFXGE_NDESCS), B_TRUE);
+       sfxge_rx_qfill(rxq, EFX_RXQ_LIMIT(rxq->entries), B_TRUE);
 }
 
 static void __sfxge_rx_deliver(struct sfxge_softc *sc, struct mbuf *m)
@@ -757,7 +756,7 @@
                unsigned int id;
                struct sfxge_rx_sw_desc *rx_desc;
 
-               id = completed++ & (SFXGE_NDESCS - 1);
+               id = completed++ & rxq->ptr_mask;
                rx_desc = &rxq->queue[id];
                m = rx_desc->mbuf;
 
@@ -821,8 +820,8 @@
                sfxge_lro_end_of_burst(rxq);
 
        /* Top up the queue if necessary */
-       if (level < RX_REFILL_THRESHOLD)
-               sfxge_rx_qfill(rxq, EFX_RXQ_LIMIT(SFXGE_NDESCS), B_FALSE);
+       if (level < rxq->refill_threshold)
+               sfxge_rx_qfill(rxq, EFX_RXQ_LIMIT(rxq->entries), B_FALSE);
 }
 
 static void
@@ -884,7 +883,7 @@
        efx_rx_qdestroy(rxq->common);
 
        efx_sram_buf_tbl_clear(sc->enp, rxq->buf_base_id,
-           EFX_RXQ_NBUFS(SFXGE_NDESCS));
+           EFX_RXQ_NBUFS(sc->rxq_entries));
 
        mtx_unlock(&evq->lock);
 }
@@ -908,12 +907,12 @@
 
        /* Program the buffer table. */
        if ((rc = efx_sram_buf_tbl_set(sc->enp, rxq->buf_base_id, esmp,
-           EFX_RXQ_NBUFS(SFXGE_NDESCS))) != 0)
-               return rc;
+           EFX_RXQ_NBUFS(sc->rxq_entries))) != 0)
+               return (rc);
 
        /* Create the common code receive queue. */
        if ((rc = efx_rx_qcreate(sc->enp, index, index, EFX_RXQ_TYPE_DEFAULT,
-           esmp, SFXGE_NDESCS, rxq->buf_base_id, evq->common,
+           esmp, sc->rxq_entries, rxq->buf_base_id, evq->common,
            &rxq->common)) != 0)
                goto fail;
 
@@ -925,7 +924,7 @@
        rxq->init_state = SFXGE_RXQ_STARTED;
 
        /* Try to fill the queue from the pool. */
-       sfxge_rx_qfill(rxq, EFX_RXQ_LIMIT(SFXGE_NDESCS), B_FALSE);
+       sfxge_rx_qfill(rxq, EFX_RXQ_LIMIT(sc->rxq_entries), B_FALSE);
 
        mtx_unlock(&evq->lock);
 
@@ -933,8 +932,8 @@
 
 fail:
        efx_sram_buf_tbl_clear(sc->enp, rxq->buf_base_id,
-           EFX_RXQ_NBUFS(SFXGE_NDESCS));
-       return rc;
+           EFX_RXQ_NBUFS(sc->rxq_entries));
+       return (rc);
 }
 
 void
@@ -1105,6 +1104,9 @@
        rxq = malloc(sizeof(struct sfxge_rxq), M_SFXGE, M_ZERO | M_WAITOK);
        rxq->sc = sc;
        rxq->index = index;
+       rxq->entries = sc->rxq_entries;
+       rxq->ptr_mask = rxq->entries - 1;
+       rxq->refill_threshold = RX_REFILL_THRESHOLD(rxq->entries);
 
        sc->rxq[index] = rxq;
        esmp = &rxq->mem;
@@ -1112,16 +1114,16 @@
        evq = sc->evq[index];
 
        /* Allocate and zero DMA space. */
-       if ((rc = sfxge_dma_alloc(sc, EFX_RXQ_SIZE(SFXGE_NDESCS), esmp)) != 0)
+       if ((rc = sfxge_dma_alloc(sc, EFX_RXQ_SIZE(sc->rxq_entries), esmp)) != 
0)
                return (rc);
-       (void)memset(esmp->esm_base, 0, EFX_RXQ_SIZE(SFXGE_NDESCS));
+       (void)memset(esmp->esm_base, 0, EFX_RXQ_SIZE(sc->rxq_entries));
 
        /* Allocate buffer table entries. */
-       sfxge_sram_buf_tbl_alloc(sc, EFX_RXQ_NBUFS(SFXGE_NDESCS),
+       sfxge_sram_buf_tbl_alloc(sc, EFX_RXQ_NBUFS(sc->rxq_entries),
                                 &rxq->buf_base_id);
 
        /* Allocate the context array and the flow table. */
-       rxq->queue = malloc(sizeof(struct sfxge_rx_sw_desc) * SFXGE_NDESCS,
+       rxq->queue = malloc(sizeof(struct sfxge_rx_sw_desc) * sc->rxq_entries,
            M_SFXGE, M_WAITOK | M_ZERO);
        sfxge_lro_init(rxq);
 
diff -r abfcbc1ff6b9 sys/dev/sfxge/sfxge_rx.h
--- a/sys/dev/sfxge/sfxge_rx.h  Thu Sep 25 15:59:36 2014 +0400
+++ b/sys/dev/sfxge/sfxge_rx.h  Thu Sep 25 16:06:37 2014 +0400
@@ -159,6 +159,8 @@
        efsys_mem_t                     mem;
        unsigned int                    buf_base_id;
        enum sfxge_rxq_state            init_state;
+       unsigned int                    entries;
+       unsigned int                    ptr_mask;
 
        struct sfxge_rx_sw_desc         *queue __aligned(CACHE_LINE_SIZE);
        unsigned int                    added;
@@ -166,6 +168,7 @@
        unsigned int                    completed;
        unsigned int                    loopback;
        struct sfxge_lro_state          lro;
+       unsigned int                    refill_threshold;
        struct callout                  refill_callout;
        unsigned int                    refill_delay;
 
diff -r abfcbc1ff6b9 sys/dev/sfxge/sfxge_tx.c
--- a/sys/dev/sfxge/sfxge_tx.c  Thu Sep 25 15:59:36 2014 +0400
+++ b/sys/dev/sfxge/sfxge_tx.c  Thu Sep 25 16:06:37 2014 +0400
@@ -75,7 +75,7 @@
  * minimum MSS of 512.
  */
 #define        SFXGE_TSO_MAX_DESC ((65535 / 512) * 2 + 
SFXGE_TX_MAPPING_MAX_SEG - 1)
-#define        SFXGE_TXQ_BLOCK_LEVEL (SFXGE_NDESCS - SFXGE_TSO_MAX_DESC)
+#define        SFXGE_TXQ_BLOCK_LEVEL(_entries) ((_entries) - 
SFXGE_TSO_MAX_DESC)
 
 /* Forward declarations. */
 static inline void sfxge_tx_qdpl_service(struct sfxge_txq *txq);
@@ -101,7 +101,7 @@
                struct sfxge_tx_mapping *stmp;
                unsigned int id;
 
-               id = completed++ & (SFXGE_NDESCS - 1);
+               id = completed++ & txq->ptr_mask;
 
                stmp = &txq->stmp[id];
                if (stmp->flags & TX_BUF_UNMAP) {
@@ -125,7 +125,7 @@
                unsigned int level;
 
                level = txq->added - txq->completed;
-               if (level <= SFXGE_TXQ_UNBLOCK_LEVEL)
+               if (level <= SFXGE_TXQ_UNBLOCK_LEVEL(txq->entries))
                        sfxge_tx_qunblock(txq);
        }
 }
@@ -218,19 +218,19 @@
                ("efx_tx_qpost() refragmented descriptors"));
 
        level = txq->added - txq->reaped;
-       KASSERT(level <= SFXGE_NDESCS, ("overfilled TX queue"));
+       KASSERT(level <= txq->entries, ("overfilled TX queue"));
 
        /* Clear the fragment list. */
        txq->n_pend_desc = 0;
 
        /* Have we reached the block level? */
-       if (level < SFXGE_TXQ_BLOCK_LEVEL)
+       if (level < SFXGE_TXQ_BLOCK_LEVEL(txq->entries))
                return;
 
        /* Reap, and check again */
        sfxge_tx_qreap(txq);
        level = txq->added - txq->reaped;
-       if (level < SFXGE_TXQ_BLOCK_LEVEL)
+       if (level < SFXGE_TXQ_BLOCK_LEVEL(txq->entries))
                return;
 
        txq->blocked = 1;
@@ -242,7 +242,7 @@
        mb();
        sfxge_tx_qreap(txq);
        level = txq->added - txq->reaped;
-       if (level < SFXGE_TXQ_BLOCK_LEVEL) {
+       if (level < SFXGE_TXQ_BLOCK_LEVEL(txq->entries)) {
                mb();
                txq->blocked = 0;
        }
@@ -271,7 +271,7 @@
        }
 
        /* Load the packet for DMA. */
-       id = txq->added & (SFXGE_NDESCS - 1);
+       id = txq->added & txq->ptr_mask;
        stmp = &txq->stmp[id];
        rc = bus_dmamap_load_mbuf_sg(txq->packet_dma_tag, stmp->map,
                                     mbuf, dma_seg, &n_dma_seg, 0);
@@ -318,7 +318,7 @@
 
                        stmp->flags = 0;
                        if (__predict_false(stmp ==
-                                           &txq->stmp[SFXGE_NDESCS - 1]))
+                                           &txq->stmp[txq->ptr_mask]))
                                stmp = &txq->stmp[0];
                        else
                                stmp++;
@@ -762,20 +762,22 @@
  * a TSO header buffer, since they must always be followed by a
  * payload descriptor referring to an mbuf.
  */
-#define        TSOH_COUNT      (SFXGE_NDESCS / 2u)
+#define        TSOH_COUNT(_txq_entries)        ((_txq_entries) / 2u)
 #define        TSOH_PER_PAGE   (PAGE_SIZE / TSOH_STD_SIZE)
-#define        TSOH_PAGE_COUNT ((TSOH_COUNT + TSOH_PER_PAGE - 1) / 
TSOH_PER_PAGE)
+#define        TSOH_PAGE_COUNT(_txq_entries)   \
+       ((TSOH_COUNT(_txq_entries) + TSOH_PER_PAGE - 1) / TSOH_PER_PAGE)
 
 static int tso_init(struct sfxge_txq *txq)
 {
        struct sfxge_softc *sc = txq->sc;
+       unsigned int tsoh_page_count = TSOH_PAGE_COUNT(sc->txq_entries);
        int i, rc;
 
        /* Allocate TSO header buffers */
-       txq->tsoh_buffer = malloc(TSOH_PAGE_COUNT * sizeof(txq->tsoh_buffer[0]),
+       txq->tsoh_buffer = malloc(tsoh_page_count * sizeof(txq->tsoh_buffer[0]),
                                  M_SFXGE, M_WAITOK);
 
-       for (i = 0; i < TSOH_PAGE_COUNT; i++) {
+       for (i = 0; i < tsoh_page_count; i++) {
                rc = sfxge_dma_alloc(sc, PAGE_SIZE, &txq->tsoh_buffer[i]);
                if (rc != 0)
                        goto fail;
@@ -796,7 +798,7 @@
        int i;
 
        if (txq->tsoh_buffer != NULL) {
-               for (i = 0; i < TSOH_PAGE_COUNT; i++)
+               for (i = 0; i < TSOH_PAGE_COUNT(txq->sc->txq_entries); i++)
                        sfxge_dma_free(&txq->tsoh_buffer[i]);
                free(txq->tsoh_buffer, M_SFXGE);
        }
@@ -1010,12 +1012,12 @@
                tso.dma_addr = dma_seg->ds_addr + tso.header_len;
        }
 
-       id = txq->added & (SFXGE_NDESCS - 1);
+       id = txq->added & txq->ptr_mask;
        if (__predict_false(tso_start_new_packet(txq, &tso, id)))
-               return -1;
+               return (-1);
 
        while (1) {
-               id = (id + 1) & (SFXGE_NDESCS - 1);
+               id = (id + 1) & txq->ptr_mask;
                tso_fill_packet_with_fragment(txq, &tso);
 
                /* Move onto the next fragment? */
@@ -1038,7 +1040,7 @@
                        if (txq->n_pend_desc >
                            SFXGE_TSO_MAX_DESC - (1 + SFXGE_TX_MAPPING_MAX_SEG))
                                break;
-                       next_id = (id + 1) & (SFXGE_NDESCS - 1);
+                       next_id = (id + 1) & txq->ptr_mask;
                        if (__predict_false(tso_start_new_packet(txq, &tso,
                                                                 next_id)))
                                break;
@@ -1070,7 +1072,7 @@
                unsigned int level;
 
                level = txq->added - txq->completed;
-               if (level <= SFXGE_TXQ_UNBLOCK_LEVEL)
+               if (level <= SFXGE_TXQ_UNBLOCK_LEVEL(txq->entries))
                        txq->blocked = 0;
        }
 
@@ -1146,7 +1148,7 @@
        txq->common = NULL;
 
        efx_sram_buf_tbl_clear(sc->enp, txq->buf_base_id,
-           EFX_TXQ_NBUFS(SFXGE_NDESCS));
+           EFX_TXQ_NBUFS(sc->txq_entries));
 
        mtx_unlock(&evq->lock);
        mtx_unlock(SFXGE_TXQ_LOCK(txq));
@@ -1172,8 +1174,8 @@
 
        /* Program the buffer table. */
        if ((rc = efx_sram_buf_tbl_set(sc->enp, txq->buf_base_id, esmp,
-           EFX_TXQ_NBUFS(SFXGE_NDESCS))) != 0)
-               return rc;
+           EFX_TXQ_NBUFS(sc->txq_entries))) != 0)
+               return (rc);
 
        /* Determine the kind of queue we are creating. */
        switch (txq->type) {
@@ -1194,7 +1196,7 @@
 
        /* Create the common code transmit queue. */
        if ((rc = efx_tx_qcreate(sc->enp, index, txq->type, esmp,
-           SFXGE_NDESCS, txq->buf_base_id, flags, evq->common,
+           sc->txq_entries, txq->buf_base_id, flags, evq->common,
            &txq->common)) != 0)
                goto fail;
 
@@ -1211,8 +1213,8 @@
 
 fail:
        efx_sram_buf_tbl_clear(sc->enp, txq->buf_base_id,
-           EFX_TXQ_NBUFS(SFXGE_NDESCS));
-       return rc;
+           EFX_TXQ_NBUFS(sc->txq_entries));
+       return (rc);
 }
 
 void
@@ -1280,7 +1282,7 @@
 sfxge_tx_qfini(struct sfxge_softc *sc, unsigned int index)
 {
        struct sfxge_txq *txq;
-       unsigned int nmaps = SFXGE_NDESCS;
+       unsigned int nmaps;
 
        txq = sc->txq[index];
 
@@ -1292,6 +1294,7 @@
 
        /* Free the context arrays. */
        free(txq->pend_desc, M_SFXGE);
+       nmaps = sc->txq_entries;
        while (nmaps-- != 0)
                bus_dmamap_destroy(txq->packet_dma_tag, txq->stmp[nmaps].map);
        free(txq->stmp, M_SFXGE);
@@ -1323,6 +1326,8 @@
 
        txq = malloc(sizeof(struct sfxge_txq), M_SFXGE, M_ZERO | M_WAITOK);
        txq->sc = sc;
+       txq->entries = sc->txq_entries;
+       txq->ptr_mask = txq->entries - 1;
 
        sc->txq[txq_index] = txq;
        esmp = &txq->mem;
@@ -1330,12 +1335,12 @@
        evq = sc->evq[evq_index];
 
        /* Allocate and zero DMA space for the descriptor ring. */
-       if ((rc = sfxge_dma_alloc(sc, EFX_TXQ_SIZE(SFXGE_NDESCS), esmp)) != 0)
+       if ((rc = sfxge_dma_alloc(sc, EFX_TXQ_SIZE(sc->txq_entries), esmp)) != 
0)
                return (rc);
-       (void)memset(esmp->esm_base, 0, EFX_TXQ_SIZE(SFXGE_NDESCS));
+       (void)memset(esmp->esm_base, 0, EFX_TXQ_SIZE(sc->txq_entries));
 
        /* Allocate buffer table entries. */
-       sfxge_sram_buf_tbl_alloc(sc, EFX_TXQ_NBUFS(SFXGE_NDESCS),
+       sfxge_sram_buf_tbl_alloc(sc, EFX_TXQ_NBUFS(sc->txq_entries),
                                 &txq->buf_base_id);
 
        /* Create a DMA tag for packet mappings. */
@@ -1349,13 +1354,13 @@
        }
 
        /* Allocate pending descriptor array for batching writes. */
-       txq->pend_desc = malloc(sizeof(efx_buffer_t) * SFXGE_NDESCS,
+       txq->pend_desc = malloc(sizeof(efx_buffer_t) * sc->txq_entries,
                                M_SFXGE, M_ZERO | M_WAITOK);
 
        /* Allocate and initialise mbuf DMA mapping array. */
-       txq->stmp = malloc(sizeof(struct sfxge_tx_mapping) * SFXGE_NDESCS,
+       txq->stmp = malloc(sizeof(struct sfxge_tx_mapping) * sc->txq_entries,
            M_SFXGE, M_ZERO | M_WAITOK);
-       for (nmaps = 0; nmaps < SFXGE_NDESCS; nmaps++) {
+       for (nmaps = 0; nmaps < sc->txq_entries; nmaps++) {
                rc = bus_dmamap_create(txq->packet_dma_tag, 0,
                                       &txq->stmp[nmaps].map);
                if (rc != 0)
diff -r abfcbc1ff6b9 sys/dev/sfxge/sfxge_tx.h
--- a/sys/dev/sfxge/sfxge_tx.h  Thu Sep 25 15:59:36 2014 +0400
+++ b/sys/dev/sfxge/sfxge_tx.h  Thu Sep 25 16:06:37 2014 +0400
@@ -106,7 +106,7 @@
        SFXGE_TXQ_NTYPES
 };
 
-#define        SFXGE_TXQ_UNBLOCK_LEVEL         (EFX_TXQ_LIMIT(SFXGE_NDESCS) / 
4)
+#define        SFXGE_TXQ_UNBLOCK_LEVEL(_entries)       
(EFX_TXQ_LIMIT(_entries) / 4)
 
 #define        SFXGE_TX_BATCH  64
 
@@ -128,6 +128,8 @@
        unsigned int                    evq_index;
        efsys_mem_t                     mem;
        unsigned int                    buf_base_id;
+       unsigned int                    entries;
+       unsigned int                    ptr_mask;
 
        struct sfxge_tx_mapping         *stmp;  /* Packets in flight. */
        bus_dma_tag_t                   packet_dma_tag;
_______________________________________________
freebsd-net@freebsd.org mailing list
http://lists.freebsd.org/mailman/listinfo/freebsd-net
To unsubscribe, send any mail to "freebsd-net-unsubscr...@freebsd.org"

Reply via email to