On Tue, Apr 19, 2011 at 8:19 PM, Freddie Cash <fjwc...@gmail.com> wrote:
> On Tue, Apr 19, 2011 at 7:42 AM, K. Macy <km...@freebsd.org> wrote:
>>> I'm not able to find IFNET_MULTIQUEUE in a recent 8.2-STABLE, is this 
>>> something
>>> present only in HEAD?
>>
>> It looks like it is now EM_MULTIQUEUE.
>
> Just curious, how would one enable this to test it?  We have igb(4)
> interfaces in our new storage boxes, and it would be interesting to
> test whether or not it helps in our setup.
>

It should automatically allocate a queue per core up to the max
supported. Post 8.0 it should be enabled by default for igb:


#if __FreeBSD_version >= 800000
/*
** Multiqueue Transmit driver
**
*/
static int
igb_mq_start(struct ifnet *ifp, struct mbuf *m)
{
        struct adapter          *adapter = ifp->if_softc;
        struct igb_queue        *que;
        struct tx_ring          *txr;
        int                     i = 0, err = 0;

        /* Which queue to use */
        if ((m->m_flags & M_FLOWID) != 0)
                i = m->m_pkthdr.flowid % adapter->num_queues;

        txr = &adapter->tx_rings[i];
        que = &adapter->queues[i];

        if (IGB_TX_TRYLOCK(txr)) {
                err = igb_mq_start_locked(ifp, txr, m);
                IGB_TX_UNLOCK(txr);
        } else {
                err = drbr_enqueue(ifp, txr->br, m);
                taskqueue_enqueue(que->tq, &que->que_task);
        }

        return (err);
}

static int
igb_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr, struct mbuf *m)
{
        struct adapter  *adapter = txr->adapter;
        struct mbuf     *next;
        int             err = 0, enq;

        IGB_TX_LOCK_ASSERT(txr);

        if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
            IFF_DRV_RUNNING || adapter->link_active == 0) {
                if (m != NULL)
                        err = drbr_enqueue(ifp, txr->br, m);
                return (err);
        }

        /* Call cleanup if number of TX descriptors low */
        if (txr->tx_avail <= IGB_TX_CLEANUP_THRESHOLD)
                igb_txeof(txr);

        enq = 0;
        if (m == NULL) {
                next = drbr_dequeue(ifp, txr->br);
        } else if (drbr_needs_enqueue(ifp, txr->br)) {
                if ((err = drbr_enqueue(ifp, txr->br, m)) != 0)
                        return (err);
                next = drbr_dequeue(ifp, txr->br);
        } else
                next = m;

        /* Process the queue */
        while (next != NULL) {
                if ((err = igb_xmit(txr, &next)) != 0) {
                        if (next != NULL)
                                err = drbr_enqueue(ifp, txr->br, next);
                        break;
                }
                enq++;
                drbr_stats_update(ifp, next->m_pkthdr.len, next->m_flags);
                ETHER_BPF_MTAP(ifp, next);
                if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
                        break;
                if (txr->tx_avail <= IGB_TX_OP_THRESHOLD) {
                        ifp->if_drv_flags |= IFF_DRV_OACTIVE;
                        break;
                }
                next = drbr_dequeue(ifp, txr->br);
        }
        if (enq > 0) {
                /* Set the watchdog */
                txr->queue_status = IGB_QUEUE_WORKING;
                txr->watchdog_time = ticks;
        }
        return (err);
}



I haven't tested this to make sure there aren't any hidden locking
performance issues.

Cheers
_______________________________________________
freebsd-net@freebsd.org mailing list
http://lists.freebsd.org/mailman/listinfo/freebsd-net
To unsubscribe, send any mail to "freebsd-net-unsubscr...@freebsd.org"

Reply via email to