The branch stable/14 has been updated by zlei:

URL: 
https://cgit.FreeBSD.org/src/commit/?id=0fa85665105baa1fafe22608304b4eddb2ad8949

commit 0fa85665105baa1fafe22608304b4eddb2ad8949
Author:     Zhenlei Huang <z...@freebsd.org>
AuthorDate: 2024-09-03 10:25:30 +0000
Commit:     Zhenlei Huang <z...@freebsd.org>
CommitDate: 2024-09-30 04:44:24 +0000

    mana: Stop checking for failures from 
malloc/mallocarray/buf_ring_alloc(M_WAITOK)
    
    MFC after:      1 week
    Differential Revision:  https://reviews.freebsd.org/D45852
    
    (cherry picked from commit 1dc7a7b74b5ad37ff7c8dc22f1a710460a5f1dcd)
---
 sys/dev/mana/gdma_main.c  | 19 -------------------
 sys/dev/mana/hw_channel.c | 17 -----------------
 sys/dev/mana/mana_en.c    | 41 -----------------------------------------
 3 files changed, 77 deletions(-)

diff --git a/sys/dev/mana/gdma_main.c b/sys/dev/mana/gdma_main.c
index d0438f127b83..090f6382b071 100644
--- a/sys/dev/mana/gdma_main.c
+++ b/sys/dev/mana/gdma_main.c
@@ -868,9 +868,6 @@ int mana_gd_create_hwc_queue(struct gdma_dev *gd,
        int err;
 
        queue = malloc(sizeof(*queue), M_DEVBUF, M_WAITOK | M_ZERO);
-       if (!queue)
-               return ENOMEM;
-
        gmi = &queue->mem_info;
        err = mana_gd_alloc_memory(gc, spec->queue_size, gmi);
        if (err)
@@ -962,9 +959,6 @@ mana_gd_create_dma_region(struct gdma_dev *gd,
        }
 
        req = malloc(req_msg_size, M_DEVBUF, M_WAITOK | M_ZERO);
-       if (!req)
-               return ENOMEM;
-
        mana_gd_init_req_hdr(&req->hdr, GDMA_CREATE_DMA_REGION,
            req_msg_size, sizeof(resp));
        req->length = length;
@@ -1008,9 +1002,6 @@ mana_gd_create_mana_eq(struct gdma_dev *gd,
                return EINVAL;
 
        queue = malloc(sizeof(*queue),  M_DEVBUF, M_WAITOK | M_ZERO);
-       if (!queue)
-               return ENOMEM;
-
        gmi = &queue->mem_info;
        err = mana_gd_alloc_memory(gc, spec->queue_size, gmi);
        if (err)
@@ -1056,9 +1047,6 @@ int mana_gd_create_mana_wq_cq(struct gdma_dev *gd,
                return EINVAL;
 
        queue = malloc(sizeof(*queue), M_DEVBUF, M_WAITOK | M_ZERO);
-       if (!queue)
-               return ENOMEM;
-
        gmi = &queue->mem_info;
        err = mana_gd_alloc_memory(gc, spec->queue_size, gmi);
        if (err)
@@ -1480,9 +1468,6 @@ mana_gd_alloc_res_map(uint32_t res_avail,
 
        r->map =
            malloc(n * sizeof(unsigned long), M_DEVBUF, M_WAITOK | M_ZERO);
-       if (!r->map)
-               return ENOMEM;
-
        r->size = res_avail;
        mtx_init(&r->lock_spin, lock_name, NULL, MTX_SPIN);
 
@@ -1616,10 +1601,6 @@ mana_gd_setup_irqs(device_t dev)
 
        gc->irq_contexts = malloc(nvec * sizeof(struct gdma_irq_context),
            M_DEVBUF, M_WAITOK | M_ZERO);
-       if (!gc->irq_contexts) {
-               rc = ENOMEM;
-               goto err_setup_irq_release;
-       }
 
        for (i = 0; i < nvec; i++) {
                gic = &gc->irq_contexts[i];
diff --git a/sys/dev/mana/hw_channel.c b/sys/dev/mana/hw_channel.c
index 1ecd276c0152..1d6a896d79dc 100644
--- a/sys/dev/mana/hw_channel.c
+++ b/sys/dev/mana/hw_channel.c
@@ -416,8 +416,6 @@ mana_hwc_create_cq(struct hw_channel_context *hwc,
                cq_size = MINIMUM_SUPPORTED_PAGE_SIZE;
 
        hwc_cq = malloc(sizeof(*hwc_cq), M_DEVBUF, M_WAITOK | M_ZERO);
-       if (!hwc_cq)
-               return ENOMEM;
 
        err = mana_hwc_create_gdma_eq(hwc, eq_size, ctx, callback, &eq);
        if (err) {
@@ -438,10 +436,6 @@ mana_hwc_create_cq(struct hw_channel_context *hwc,
 
        comp_buf = mallocarray(q_depth, sizeof(struct gdma_comp),
            M_DEVBUF, M_WAITOK | M_ZERO);
-       if (!comp_buf) {
-               err = ENOMEM;
-               goto out;
-       }
 
        hwc_cq->hwc = hwc;
        hwc_cq->comp_buf = comp_buf;
@@ -476,8 +470,6 @@ mana_hwc_alloc_dma_buf(struct hw_channel_context *hwc, 
uint16_t q_depth,
        dma_buf = malloc(sizeof(*dma_buf) +
            q_depth * sizeof(struct hwc_work_request),
            M_DEVBUF, M_WAITOK | M_ZERO);
-       if (!dma_buf)
-               return ENOMEM;
 
        dma_buf->num_reqs = q_depth;
 
@@ -560,8 +552,6 @@ mana_hwc_create_wq(struct hw_channel_context *hwc,
                queue_size = MINIMUM_SUPPORTED_PAGE_SIZE;
 
        hwc_wq = malloc(sizeof(*hwc_wq), M_DEVBUF, M_WAITOK | M_ZERO);
-       if (!hwc_wq)
-               return ENOMEM;
 
        err = mana_hwc_create_gdma_wq(hwc, q_type, queue_size, &queue);
        if (err)
@@ -669,8 +659,6 @@ mana_hwc_test_channel(struct hw_channel_context *hwc, 
uint16_t q_depth,
 
        ctx = malloc(q_depth * sizeof(struct hwc_caller_ctx),
            M_DEVBUF, M_WAITOK | M_ZERO);
-       if (!ctx)
-               return ENOMEM;
 
        for (i = 0; i < q_depth; ++i)
                init_completion(&ctx[i].comp_event);
@@ -719,9 +707,6 @@ mana_hwc_establish_channel(struct gdma_context *gc, 
uint16_t *q_depth,
 
        gc->cq_table = malloc(gc->max_num_cqs * sizeof(struct gdma_queue *),
            M_DEVBUF, M_WAITOK | M_ZERO);
-       if (!gc->cq_table)
-               return ENOMEM;
-
        gc->cq_table[cq->id] = cq;
 
        return 0;
@@ -782,8 +767,6 @@ mana_hwc_create_channel(struct gdma_context *gc)
        int err;
 
        hwc = malloc(sizeof(*hwc), M_DEVBUF, M_WAITOK | M_ZERO);
-       if (!hwc)
-               return ENOMEM;
 
        gd->gdma_context = gc;
        gd->driver_data = hwc;
diff --git a/sys/dev/mana/mana_en.c b/sys/dev/mana/mana_en.c
index 50a6192e5fbc..d5efb071a07e 100644
--- a/sys/dev/mana/mana_en.c
+++ b/sys/dev/mana/mana_en.c
@@ -921,13 +921,6 @@ mana_init_port_context(struct mana_port_context *apc)
        apc->rxqs = mallocarray(apc->num_queues, sizeof(struct mana_rxq *),
            M_DEVBUF, M_WAITOK | M_ZERO);
 
-       if (!apc->rxqs) {
-               bus_dma_tag_destroy(apc->tx_buf_tag);
-               bus_dma_tag_destroy(apc->rx_buf_tag);
-               apc->rx_buf_tag = NULL;
-               return ENOMEM;
-       }
-
        return 0;
 }
 
@@ -1156,8 +1149,6 @@ mana_cfg_vport_steering(struct mana_port_context *apc,
 
        req_buf_size = sizeof(*req) + sizeof(mana_handle_t) * num_entries;
        req = malloc(req_buf_size, M_DEVBUF, M_WAITOK | M_ZERO);
-       if (!req)
-               return ENOMEM;
 
        mana_gd_init_req_hdr(&req->hdr, MANA_CONFIG_VPORT_RX, req_buf_size,
            sizeof(resp));
@@ -1325,8 +1316,6 @@ mana_create_eq(struct mana_context *ac)
 
        ac->eqs = mallocarray(gc->max_num_queues, sizeof(struct mana_eq),
            M_DEVBUF, M_WAITOK | M_ZERO);
-       if (!ac->eqs)
-               return ENOMEM;
 
        spec.type = GDMA_EQ;
        spec.monitor_avl_buf = false;
@@ -2043,8 +2032,6 @@ mana_create_txq(struct mana_port_context *apc, if_t net)
 
        apc->tx_qp = mallocarray(apc->num_queues, sizeof(struct mana_tx_qp),
            M_DEVBUF, M_WAITOK | M_ZERO);
-       if (!apc->tx_qp)
-               return ENOMEM;
 
        /*  The minimum size of the WQE is 32 bytes, hence
         *  MAX_SEND_BUFFERS_PER_QUEUE represents the maximum number of WQEs
@@ -2141,14 +2128,6 @@ mana_create_txq(struct mana_port_context *apc, if_t net)
                txq->tx_buf_info = malloc(MAX_SEND_BUFFERS_PER_QUEUE *
                    sizeof(struct mana_send_buf_info),
                    M_DEVBUF, M_WAITOK | M_ZERO);
-               if (unlikely(txq->tx_buf_info == NULL)) {
-                       if_printf(net,
-                           "Failed to allocate tx buf info for SQ %u\n",
-                           txq->gdma_sq->id);
-                       err = ENOMEM;
-                       goto out;
-               }
-
 
                snprintf(txq->txq_mtx_name, nitems(txq->txq_mtx_name),
                    "mana:tx(%d)", i);
@@ -2156,13 +2135,6 @@ mana_create_txq(struct mana_port_context *apc, if_t net)
 
                txq->txq_br = buf_ring_alloc(4 * MAX_SEND_BUFFERS_PER_QUEUE,
                    M_DEVBUF, M_WAITOK, &txq->txq_mtx);
-               if (unlikely(txq->txq_br == NULL)) {
-                       if_printf(net,
-                           "Failed to allocate buf ring for SQ %u\n",
-                           txq->gdma_sq->id);
-                       err = ENOMEM;
-                       goto out;
-               }
 
                /* Allocate taskqueue for deferred send */
                TASK_INIT(&txq->enqueue_task, 0, mana_xmit_taskfunc, txq);
@@ -2353,9 +2325,6 @@ mana_create_rxq(struct mana_port_context *apc, uint32_t 
rxq_idx,
        rxq = malloc(sizeof(*rxq) +
            RX_BUFFERS_PER_QUEUE * sizeof(struct mana_recv_buf_oob),
            M_DEVBUF, M_WAITOK | M_ZERO);
-       if (!rxq)
-               return NULL;
-
        rxq->ndev = ndev;
        rxq->num_rx_buf = RX_BUFFERS_PER_QUEUE;
        rxq->rxq_idx = rxq_idx;
@@ -2808,12 +2777,6 @@ mana_probe_port(struct mana_context *ac, int port_idx,
        *ndev_storage = ndev;
 
        apc = malloc(sizeof(*apc), M_DEVBUF, M_WAITOK | M_ZERO);
-       if (!apc) {
-               mana_err(NULL, "Failed to allocate port context\n");
-               err = ENOMEM;
-               goto free_net;
-       }
-
        apc->ac = ac;
        apc->ndev = ndev;
        apc->max_queues = gc->max_num_queues;
@@ -2892,7 +2855,6 @@ mana_probe_port(struct mana_context *ac, int port_idx,
 
 reset_apc:
        free(apc, M_DEVBUF);
-free_net:
        *ndev_storage = NULL;
        if_printf(ndev, "Failed to probe vPort %d: %d\n", port_idx, err);
        if_free(ndev);
@@ -2915,9 +2877,6 @@ int mana_probe(struct gdma_dev *gd)
                return err;
 
        ac = malloc(sizeof(*ac), M_DEVBUF, M_WAITOK | M_ZERO);
-       if (!ac)
-               return ENOMEM;
-
        ac->gdma_dev = gd;
        ac->num_ports = 1;
        gd->driver_data = ac;

Reply via email to