Hi, > uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, > uint16_t nb_pkts) > { > uint16_t index; > - unsigned int frags; > - unsigned int pkt_len; > - unsigned int seg_len; > - unsigned int inc_len; > + unsigned int pkt_len, data_len; > unsigned int nb_segs; > - struct rte_mbuf *tx_pkt, *next_tx_pkt; > + struct rte_mbuf *tx_pkt; > struct vnic_wq *wq = (struct vnic_wq *)tx_queue; > struct enic *enic = vnic_dev_priv(wq->vdev); > unsigned short vlan_id; > unsigned short ol_flags;
Above ol_flags should be uint64_t. > - uint8_t last_seg, eop; > - unsigned int host_tx_descs = 0; > + unsigned int wq_desc_avail; > + int head_idx; > + struct vnic_wq_buf *buf; > + unsigned int hw_ip_cksum_enabled; > + unsigned int desc_count; > + struct wq_enet_desc *descs, *desc_p, desc_tmp; > + uint16_t mss; > + uint8_t vlan_tag_insert; > + uint8_t eop; > + uint64_t bus_addr; > > + enic_cleanup_wq(enic, wq); > + wq_desc_avail = vnic_wq_desc_avail(wq); > + head_idx = wq->head_idx; > + desc_count = wq->ring.desc_count; > + > + nb_pkts = RTE_MIN(nb_pkts, ENIC_TX_XMIT_MAX); > + > + hw_ip_cksum_enabled = enic->hw_ip_checksum; > for (index = 0; index < nb_pkts; index++) { > tx_pkt = *tx_pkts++; > - inc_len = 0; > nb_segs = tx_pkt->nb_segs; > - if (nb_segs > vnic_wq_desc_avail(wq)) { > + if (nb_segs > wq_desc_avail) { > if (index > 0) > - enic_post_wq_index(wq); > - > - /* wq cleanup and try again */ > - if (!enic_cleanup_wq(enic, wq) || > - (nb_segs > vnic_wq_desc_avail(wq))) { > - return index; > - } > + goto post; > + goto done; > } > > pkt_len = tx_pkt->pkt_len; > + data_len = tx_pkt->data_len; > vlan_id = tx_pkt->vlan_tci; > ol_flags = tx_pkt->ol_flags; Cause you may miss a lot of flags in here. Piotr > - for (frags = 0; inc_len < pkt_len; frags++) { > - if (!tx_pkt) > - break; > - next_tx_pkt = tx_pkt->next; > - seg_len = tx_pkt->data_len; > - inc_len += seg_len; > - > - host_tx_descs++; > - last_seg = 0; > - eop = 0; > - if ((pkt_len == inc_len) || !next_tx_pkt) { > - eop = 1; > - /* post if last packet in batch or > thresh */ > - if ((index == (nb_pkts - 1)) || > - (host_tx_descs > ENIC_TX_POST_THRESH)) > { > - last_seg = 1; > - host_tx_descs = 0; > - } > + > + mss = 0; > + vlan_tag_insert = 0; > + bus_addr = (dma_addr_t) > + (tx_pkt->buf_physaddr + tx_pkt->data_off); > + > + descs = (struct wq_enet_desc *)wq->ring.descs; > + desc_p = descs + head_idx; > + > + eop = (data_len == pkt_len); > + > + if (ol_flags & PKT_TX_VLAN_PKT) > + vlan_tag_insert = 1; > + > + if (hw_ip_cksum_enabled && (ol_flags & > PKT_TX_IP_CKSUM)) > + mss |= ENIC_CALC_IP_CKSUM; > + > + if (hw_ip_cksum_enabled && (ol_flags & > PKT_TX_TCP_UDP_CKSUM)) > + mss |= ENIC_CALC_TCP_UDP_CKSUM; > + > + wq_enet_desc_enc(&desc_tmp, bus_addr, data_len, mss, 0, > 0, eop, > + eop, 0, vlan_tag_insert, vlan_id, 0); > + > + *desc_p = desc_tmp; > + buf = &wq->bufs[head_idx]; > + buf->mb = (void *)tx_pkt; > + head_idx = enic_ring_incr(desc_count, head_idx); > + wq_desc_avail--; > + > + if (!eop) { > + for (tx_pkt = tx_pkt->next; tx_pkt; tx_pkt = > + tx_pkt->next) { > + data_len = tx_pkt->data_len; > + > + if (tx_pkt->next == NULL) > + eop = 1; > + desc_p = descs + head_idx; > + bus_addr = (dma_addr_t)(tx_pkt- > >buf_physaddr > + + tx_pkt->data_off); > + wq_enet_desc_enc((struct wq_enet_desc *) > + &desc_tmp, bus_addr, > data_len, > + mss, 0, 0, eop, eop, 0, > + vlan_tag_insert, vlan_id, 0); > + > + *desc_p = desc_tmp; > + buf = &wq->bufs[head_idx]; > + buf->mb = (void *)tx_pkt; > + head_idx = enic_ring_incr(desc_count, > head_idx); > + wq_desc_avail--; > } > - enic_send_pkt(enic, wq, tx_pkt, (unsigned > short)seg_len, > - !frags, eop, last_seg, ol_flags, vlan_id); > - tx_pkt = next_tx_pkt; > } > } > + post: > + rte_wmb(); > + iowrite32(head_idx, &wq->ctrl->posted_index); > + done: > + wq->ring.desc_avail = wq_desc_avail; > + wq->head_idx = head_idx; > > - enic_cleanup_wq(enic, wq); > return index; > } > + > + > -- > 2.7.0