+static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
+{
+ struct ionic_tx_stats *stats = q_to_tx_stats(q);
+ struct ionic_desc_info *abort = q->head;
+ struct device *dev = q->lif->ionic->dev;
+ struct ionic_desc_info *rewind = abort;
+ struct ionic_txq_sg_elem *elem;
+ struct ionic_txq_desc *desc;
+ unsigned int frag_left = 0;
+ unsigned int offset = 0;
+ unsigned int len_left;
+ dma_addr_t desc_addr;
+ unsigned int hdrlen;
+ unsigned int nfrags;
+ unsigned int seglen;
+ u64 total_bytes = 0;
+ u64 total_pkts = 0;
+ unsigned int left;
+ unsigned int len;
+ unsigned int mss;
+ skb_frag_t *frag;
+ bool start, done;
+ bool outer_csum;
+ bool has_vlan;
+ u16 desc_len;
+ u8 desc_nsge;
+ u16 vlan_tci;
+ bool encap;
+ int err;
+
+ mss = skb_shinfo(skb)->gso_size;
+ nfrags = skb_shinfo(skb)->nr_frags;
+ len_left = skb->len - skb_headlen(skb);
+ outer_csum = (skb_shinfo(skb)->gso_type & SKB_GSO_GRE_CSUM) ||
+ (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM);
+ has_vlan = !!skb_vlan_tag_present(skb);
+ vlan_tci = skb_vlan_tag_get(skb);
+ encap = skb->encapsulation;
+
+ /* Preload inner-most TCP csum field with IP pseudo hdr
+ * calculated with IP length set to zero. HW will later
+ * add in length to each TCP segment resulting from the TSO.
+ */
+
+ if (encap)
+ err = ionic_tx_tcp_inner_pseudo_csum(skb);
+ else
+ err = ionic_tx_tcp_pseudo_csum(skb);
+ if (err)
+ return err;
+
+ if (encap)
+ hdrlen = skb_inner_transport_header(skb) - skb->data +
+ inner_tcp_hdrlen(skb);
+ else
+ hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
+
+ seglen = hdrlen + mss;
+ left = skb_headlen(skb);
+
+ desc = ionic_tx_tso_next(q, &elem);
+ start = true;
+
+ /* Chop skb->data up into desc segments */
+
+ while (left > 0) {
+ len = min(seglen, left);
+ frag_left = seglen - len;
+ desc_addr = ionic_tx_map_single(q, skb->data + offset, len);
+ if (dma_mapping_error(dev, desc_addr))
+ goto err_out_abort;
+ desc_len = len;
+ desc_nsge = 0;
+ left -= len;
+ offset += len;
+ if (nfrags > 0 && frag_left > 0)
+ continue;
+ done = (nfrags == 0 && left == 0);
+ ionic_tx_tso_post(q, desc, skb,
+ desc_addr, desc_nsge, desc_len,
+ hdrlen, mss,
+ outer_csum,
+ vlan_tci, has_vlan,
+ start, done);
+ total_pkts++;
+ total_bytes += start ? len : len + hdrlen;
+ desc = ionic_tx_tso_next(q, &elem);
+ start = false;
+ seglen = mss;
+ }
+
+ /* Chop skb frags into desc segments */
+
+ for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
+ offset = 0;
+ left = skb_frag_size(frag);
+ len_left -= left;
+ nfrags--;
+ stats->frags++;
+
+ while (left > 0) {
+ if (frag_left > 0) {
+ len = min(frag_left, left);
+ frag_left -= len;
+ elem->addr =
+ cpu_to_le64(ionic_tx_map_frag(q, frag,
+ offset, len));
+ if (dma_mapping_error(dev, elem->addr))
+ goto err_out_abort;
+ elem->len = cpu_to_le16(len);
+ elem++;
+ desc_nsge++;
+ left -= len;
+ offset += len;
+ if (nfrags > 0 && frag_left > 0)
+ continue;
+ done = (nfrags == 0 && left == 0);
+ ionic_tx_tso_post(q, desc, skb, desc_addr,
+ desc_nsge, desc_len,
+ hdrlen, mss, outer_csum,
+ vlan_tci, has_vlan,
+ start, done);
+ total_pkts++;
+ total_bytes += start ? len : len + hdrlen;
+ desc = ionic_tx_tso_next(q, &elem);
+ start = false;
+ } else {
+ len = min(mss, left);
+ frag_left = mss - len;
+ desc_addr = ionic_tx_map_frag(q, frag,
+ offset, len);
+ if (dma_mapping_error(dev, desc_addr))
+ goto err_out_abort;
+ desc_len = len;
+ desc_nsge = 0;
+ left -= len;
+ offset += len;
+ if (nfrags > 0 && frag_left > 0)
+ continue;
+ done = (nfrags == 0 && left == 0);
+ ionic_tx_tso_post(q, desc, skb, desc_addr,
+ desc_nsge, desc_len,
+ hdrlen, mss, outer_csum,
+ vlan_tci, has_vlan,
+ start, done);
+ total_pkts++;
+ total_bytes += start ? len : len + hdrlen;
+ desc = ionic_tx_tso_next(q, &elem);
+ start = false;
+ }
+ }
+ }
+
+ stats->pkts += total_pkts;
+ stats->bytes += total_bytes;
+ stats->tso++;
+
+ return 0;
+
+err_out_abort:
+ while (rewind->desc != q->head->desc) {
+ ionic_tx_clean(q, rewind, NULL, NULL);
+ rewind = rewind->next;
+ }
+ q->head = abort;
+
+ return -ENOMEM;
+}