Won't actually be exercised until we start advertising the corresponding
 offload features.

Signed-off-by: Edward Cree <ec...@solarflare.com>
---
 drivers/net/ethernet/sfc/ptp.c |  3 ++-
 drivers/net/ethernet/sfc/tx.c  |  2 +-
 drivers/net/ethernet/sfc/tx.h  | 26 ++++++++++++++++++++++++++
 3 files changed, 29 insertions(+), 2 deletions(-)

diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index bd99517f06db..2e8c4569f03b 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -43,6 +43,7 @@
 #include "mcdi_pcol.h"
 #include "io.h"
 #include "farch_regs.h"
+#include "tx.h"
 #include "nic.h" /* indirectly includes ptp.h */
 
 /* Maximum number of events expected to make up a PTP event */
@@ -1081,8 +1082,8 @@ static int efx_ptp_synchronize(struct efx_nic *efx, 
unsigned int num_readings)
 /* Transmit a PTP packet via the dedicated hardware timestamped queue. */
 static void efx_ptp_xmit_skb_queue(struct efx_nic *efx, struct sk_buff *skb)
 {
-       u8 type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OUTER_CSUM 
: 0;
        struct efx_ptp_data *ptp_data = efx->ptp_data;
+       u8 type = efx_tx_csum_type_skb(skb);
        struct efx_tx_queue *tx_queue;
 
        tx_queue = efx_channel_get_tx_queue(ptp_data->channel, type);
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index bb3b41f9f9c0..13e960b23de8 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -506,7 +506,7 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
        EFX_WARN_ON_PARANOID(!netif_device_present(net_dev));
 
        index = skb_get_queue_mapping(skb);
-       type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OUTER_CSUM : 0;
+       type = efx_tx_csum_type_skb(skb);
        if (index >= efx->n_tx_channels) {
                index -= efx->n_tx_channels;
                type |= EFX_TXQ_TYPE_HIGHPRI;
diff --git a/drivers/net/ethernet/sfc/tx.h b/drivers/net/ethernet/sfc/tx.h
index a3cf06c5570d..f2c4d2f89919 100644
--- a/drivers/net/ethernet/sfc/tx.h
+++ b/drivers/net/ethernet/sfc/tx.h
@@ -18,4 +18,30 @@ unsigned int efx_tx_limit_len(struct efx_tx_queue *tx_queue,
 u8 *efx_tx_get_copy_buffer_limited(struct efx_tx_queue *tx_queue,
                                   struct efx_tx_buffer *buffer, size_t len);
 
+/* What TXQ type will satisfy the checksum offloads required for this skb? */
+static inline unsigned int efx_tx_csum_type_skb(struct sk_buff *skb)
+{
+       if (skb->ip_summed != CHECKSUM_PARTIAL)
+               return 0; /* no checksum offload */
+
+       if (skb->encapsulation &&
+           skb_checksum_start_offset(skb) == skb_inner_transport_offset(skb)) {
+               /* we only advertise features for IPv4 and IPv6 checksums on
+                * encapsulated packets, so if the checksum is for the inner
+                * packet, it must be one of them; no further checking required.
+                */
+
+               /* Do we also need to offload the outer header checksum? */
+               if (skb_shinfo(skb)->gso_segs > 1 &&
+                   !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
+                   (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
+                       return EFX_TXQ_TYPE_OUTER_CSUM | 
EFX_TXQ_TYPE_INNER_CSUM;
+               return EFX_TXQ_TYPE_INNER_CSUM;
+       }
+
+       /* similarly, we only advertise features for IPv4 and IPv6 checksums,
+        * so it must be one of them. No need for further checks.
+        */
+       return EFX_TXQ_TYPE_OUTER_CSUM;
+}
 #endif /* EFX_TX_H */

Reply via email to