Add ice support for new ethdev APIs to enable/disable and read/write/adjust
IEEE1588 PTP timstamps. Currently, only scalar path supports 1588 PTP,
vector path doesn't.

The example command for running ptpclient is as below:
./dpdk-ptpclient -c 1 -n 3 --force-max-simd-bitwidth=64 -- -T 0 -p 0x1

Signed-off-by: Simei Su <simei...@intel.com>

---
v2:
* Change patchset to one patch based on share code update.
* Change per device offload to per queue offload.

 drivers/net/ice/ice_ethdev.c | 228 ++++++++++++++++++++++++++++++++++++++++++-
 drivers/net/ice/ice_ethdev.h |   5 +
 drivers/net/ice/ice_rxtx.c   |  35 ++++++-
 drivers/net/ice/ice_rxtx.h   |   1 +
 4 files changed, 266 insertions(+), 3 deletions(-)

diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index 8d62b84..3dc7d40 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -18,6 +18,7 @@
 #include "base/ice_flow.h"
 #include "base/ice_dcb.h"
 #include "base/ice_common.h"
+#include "base/ice_ptp_hw.h"
 
 #include "rte_pmd_ice.h"
 #include "ice_ethdev.h"
@@ -30,6 +31,8 @@
 #define ICE_PROTO_XTR_ARG         "proto_xtr"
 #define ICE_HW_DEBUG_MASK_ARG     "hw_debug_mask"
 
+#define ICE_CYCLECOUNTER_MASK  0xffffffffffffffffULL
+
 static const char * const ice_valid_args[] = {
        ICE_SAFE_MODE_SUPPORT_ARG,
        ICE_PIPELINE_MODE_SUPPORT_ARG,
@@ -141,6 +144,18 @@ static int ice_dev_udp_tunnel_port_add(struct rte_eth_dev 
*dev,
                        struct rte_eth_udp_tunnel *udp_tunnel);
 static int ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
                        struct rte_eth_udp_tunnel *udp_tunnel);
+static int ice_timesync_enable(struct rte_eth_dev *dev);
+static int ice_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
+                                         struct timespec *timestamp,
+                                         uint32_t flags);
+static int ice_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
+                                         struct timespec *timestamp);
+static int ice_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
+static int ice_timesync_read_time(struct rte_eth_dev *dev,
+                                 struct timespec *timestamp);
+static int ice_timesync_write_time(struct rte_eth_dev *dev,
+                                  const struct timespec *timestamp);
+static int ice_timesync_disable(struct rte_eth_dev *dev);
 
 static const struct rte_pci_id pci_id_ice_map[] = {
        { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_BACKPLANE) },
@@ -224,6 +239,13 @@ static const struct eth_dev_ops ice_eth_dev_ops = {
        .udp_tunnel_port_del          = ice_dev_udp_tunnel_port_del,
        .tx_done_cleanup              = ice_tx_done_cleanup,
        .get_monitor_addr             = ice_get_monitor_addr,
+       .timesync_enable              = ice_timesync_enable,
+       .timesync_read_rx_timestamp   = ice_timesync_read_rx_timestamp,
+       .timesync_read_tx_timestamp   = ice_timesync_read_tx_timestamp,
+       .timesync_adjust_time         = ice_timesync_adjust_time,
+       .timesync_read_time           = ice_timesync_read_time,
+       .timesync_write_time          = ice_timesync_write_time,
+       .timesync_disable             = ice_timesync_disable,
 };
 
 /* store statistics names and its offset in stats structure */
@@ -3475,7 +3497,8 @@ ice_dev_info_get(struct rte_eth_dev *dev, struct 
rte_eth_dev_info *dev_info)
                        DEV_RX_OFFLOAD_QINQ_STRIP |
                        DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
                        DEV_RX_OFFLOAD_VLAN_EXTEND |
-                       DEV_RX_OFFLOAD_RSS_HASH;
+                       DEV_RX_OFFLOAD_RSS_HASH |
+                       DEV_RX_OFFLOAD_TIMESTAMP;
                dev_info->tx_offload_capa |=
                        DEV_TX_OFFLOAD_QINQ_INSERT |
                        DEV_TX_OFFLOAD_IPV4_CKSUM |
@@ -3487,7 +3510,7 @@ ice_dev_info_get(struct rte_eth_dev *dev, struct 
rte_eth_dev_info *dev_info)
                dev_info->flow_type_rss_offloads |= ICE_RSS_OFFLOAD_ALL;
        }
 
-       dev_info->rx_queue_offload_capa = 0;
+       dev_info->rx_queue_offload_capa = DEV_RX_OFFLOAD_TIMESTAMP;
        dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
 
        dev_info->reta_size = pf->hash_lut_size;
@@ -5287,6 +5310,207 @@ ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
 }
 
 static int
+ice_timesync_enable(struct rte_eth_dev *dev)
+{
+       struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ice_adapter *ad =
+                       ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+       int ret;
+
+       if (hw->func_caps.ts_func_info.src_tmr_owned) {
+               ret = ice_ptp_init_phc(hw);
+               if (ret) {
+                       PMD_DRV_LOG(ERR, "Failed to initialize PHC\n");
+                       return -1;
+               }
+
+               ret = ice_ptp_write_incval(hw, ICE_PTP_NOMINAL_INCVAL_E810);
+               if (ret) {
+                       PMD_DRV_LOG(ERR,
+                               "Failed to write PHC increment time value\n");
+                       return -1;
+               }
+       }
+
+       /* Initialize cycle counters for system time/RX/TX timestamp */
+       memset(&ad->systime_tc, 0, sizeof(struct rte_timecounter));
+       memset(&ad->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
+       memset(&ad->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
+
+       ad->systime_tc.cc_mask = ICE_CYCLECOUNTER_MASK;
+       ad->systime_tc.cc_shift = 0;
+       ad->systime_tc.nsec_mask = 0;
+
+       ad->rx_tstamp_tc.cc_mask = ICE_CYCLECOUNTER_MASK;
+       ad->rx_tstamp_tc.cc_shift = 0;
+       ad->rx_tstamp_tc.nsec_mask = 0;
+
+       ad->tx_tstamp_tc.cc_mask = ICE_CYCLECOUNTER_MASK;
+       ad->tx_tstamp_tc.cc_shift = 0;
+       ad->tx_tstamp_tc.nsec_mask = 0;
+
+       return 0;
+}
+
+static uint64_t
+ice_read_time(struct ice_hw *hw)
+{
+       uint32_t hi, lo, lo2;
+       uint64_t time;
+
+       lo = ICE_READ_REG(hw, GLTSYN_TIME_L(0));
+       hi = ICE_READ_REG(hw, GLTSYN_TIME_H(0));
+       lo2 = ICE_READ_REG(hw, GLTSYN_TIME_L(0));
+
+       if (lo2 < lo) {
+               lo = ICE_READ_REG(hw, GLTSYN_TIME_L(0));
+               hi = ICE_READ_REG(hw, GLTSYN_TIME_H(0));
+       }
+
+       time = ((uint64_t)hi << 32) | lo;
+
+       return time;
+}
+
+static uint64_t
+ice_tstamp_convert_32b_64b(uint64_t time, uint64_t timestamp)
+{
+       const uint64_t mask = 0xFFFFFFFF;
+       uint32_t delta;
+       uint64_t ns;
+
+       delta = (timestamp - (uint32_t)(time & mask));
+
+       if (delta > (mask / 2)) {
+               delta = ((uint32_t)(time & mask) - timestamp);
+               ns = time - delta;
+       } else {
+               ns = time + delta;
+       }
+
+       return ns;
+}
+
+static int
+ice_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
+                              struct timespec *timestamp, uint32_t flags)
+{
+       struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ice_adapter *ad =
+                       ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+       struct ice_rx_queue *rxq;
+       uint32_t ts_high;
+       uint64_t time, ts_ns, ns;
+
+       rxq = dev->data->rx_queues[flags];
+
+       time = ice_read_time(hw);
+
+       ts_high = rxq->time_high;
+       ts_ns = ice_tstamp_convert_32b_64b(time, ts_high);
+       ns = rte_timecounter_update(&ad->rx_tstamp_tc, ts_ns);
+       *timestamp = rte_ns_to_timespec(ns);
+
+       return 0;
+}
+
+static int
+ice_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
+                              struct timespec *timestamp)
+{
+       struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ice_adapter *ad =
+                       ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+       uint8_t lport;
+       uint64_t time, ts_ns, ns, tstamp;
+       const uint64_t mask = 0xFFFFFFFF;
+       int ret;
+
+       lport = hw->port_info->lport;
+
+       ret = ice_read_phy_tstamp(hw, lport, 0, &tstamp);
+       if (ret) {
+               PMD_DRV_LOG(ERR, "Failed to read phy timestamp\n");
+               return -1;
+       }
+
+       time = ice_read_time(hw);
+
+       ts_ns = ice_tstamp_convert_32b_64b(time, (tstamp >> 8) & mask);
+       ns = rte_timecounter_update(&ad->tx_tstamp_tc, ts_ns);
+       *timestamp = rte_ns_to_timespec(ns);
+
+       return 0;
+}
+
+static int
+ice_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
+{
+       struct ice_adapter *ad =
+                       ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
+       ad->systime_tc.nsec += delta;
+       ad->rx_tstamp_tc.nsec += delta;
+       ad->tx_tstamp_tc.nsec += delta;
+
+       return 0;
+}
+
+static int
+ice_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
+{
+       struct ice_adapter *ad =
+                       ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+       uint64_t ns;
+
+       ns = rte_timespec_to_ns(ts);
+
+       ad->systime_tc.nsec = ns;
+       ad->rx_tstamp_tc.nsec = ns;
+       ad->tx_tstamp_tc.nsec = ns;
+
+       return 0;
+}
+
+static int
+ice_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
+{
+       struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ice_adapter *ad =
+                       ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+       uint64_t time, ns;
+
+       time = ice_read_time(hw);
+       ns = rte_timecounter_update(&ad->systime_tc, time);
+       *ts = rte_ns_to_timespec(ns);
+
+       return 0;
+}
+
+static int
+ice_timesync_disable(struct rte_eth_dev *dev)
+{
+       struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint64_t val;
+       uint8_t lport;
+
+       lport = hw->port_info->lport;
+
+       dev->data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_TIMESTAMP;
+
+       ice_clear_phy_tstamp(hw, lport, 0);
+
+       val = ICE_READ_REG(hw, GLTSYN_ENA(0));
+       val &= ~GLTSYN_ENA_TSYN_ENA_M;
+       ICE_WRITE_REG(hw, GLTSYN_ENA(0), val);
+
+       ICE_WRITE_REG(hw, GLTSYN_INCVAL_L(0), 0);
+       ICE_WRITE_REG(hw, GLTSYN_INCVAL_H(0), 0);
+
+       return 0;
+}
+
+static int
 ice_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
              struct rte_pci_device *pci_dev)
 {
diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
index b4bf651..1c7c8ea 100644
--- a/drivers/net/ice/ice_ethdev.h
+++ b/drivers/net/ice/ice_ethdev.h
@@ -6,6 +6,7 @@
 #define _ICE_ETHDEV_H_
 
 #include <rte_kvargs.h>
+#include <rte_time.h>
 
 #include <ethdev_driver.h>
 
@@ -486,6 +487,10 @@ struct ice_adapter {
        struct ice_devargs devargs;
        enum ice_pkg_type active_pkg_type; /* loaded ddp package type */
        uint16_t fdir_ref_cnt;
+       /* For PTP */
+       struct rte_timecounter systime_tc;
+       struct rte_timecounter rx_tstamp_tc;
+       struct rte_timecounter tx_tstamp_tc;
 #ifdef RTE_ARCH_X86
        bool rx_use_avx2;
        bool rx_use_avx512;
diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c
index 5d7ab4f..6b6149f 100644
--- a/drivers/net/ice/ice_rxtx.c
+++ b/drivers/net/ice/ice_rxtx.c
@@ -354,6 +354,10 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
        regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
                QRXFLXP_CNTXT_RXDID_PRIO_M;
 
+       /* Enable timestamp bit in the queue context */
+       if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP)
+               regval |= QRXFLXP_CNTXT_TS_M;
+
        ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval);
 
        err = ice_clear_rxq_ctx(hw, rxq->reg_idx);
@@ -689,6 +693,7 @@ ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t 
tx_queue_id)
        tx_ctx.tso_ena = 1; /* tso enable */
        tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */
        tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */
+       tx_ctx.tsyn_ena = 1;
 
        ice_set_ctx(hw, (uint8_t *)&tx_ctx, txq_elem->txqs[0].txq_ctx,
                    ice_tlan_ctx_info);
@@ -1589,6 +1594,13 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
                        ice_rxd_to_vlan_tci(mb, &rxdp[j]);
                        rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
 
+                       if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
+                               rxq->time_high =
+                                  rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high);
+                               mb->timesync = rxq->queue_id;
+                               pkt_flags |= PKT_RX_IEEE1588_PTP;
+                       }
+
                        mb->ol_flags |= pkt_flags;
                }
 
@@ -1882,6 +1894,13 @@ ice_recv_scattered_pkts(void *rx_queue,
                ice_rxd_to_vlan_tci(first_seg, &rxd);
                rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
                pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
+
+               if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
+                       rxq->time_high = rxd.wb.flex_ts.ts_high;
+                       first_seg->timesync = rxq->queue_id;
+                       pkt_flags |= PKT_RX_IEEE1588_PTP;
+               }
+
                first_seg->ol_flags |= pkt_flags;
                /* Prefetch data of first segment, if configured to do so. */
                rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
@@ -2288,6 +2307,13 @@ ice_recv_pkts(void *rx_queue,
                ice_rxd_to_vlan_tci(rxm, &rxd);
                rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
                pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
+
+               if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
+                       rxq->time_high = rxd.wb.flex_ts.ts_high;
+                       rxm->timesync = rxq->queue_id;
+                       pkt_flags |= PKT_RX_IEEE1588_PTP;
+               }
+
                rxm->ol_flags |= pkt_flags;
                /* copy old mbuf to rx_pkts */
                rx_pkts[nb_rx++] = rxm;
@@ -2499,7 +2525,8 @@ ice_calc_context_desc(uint64_t flags)
        static uint64_t mask = PKT_TX_TCP_SEG |
                PKT_TX_QINQ |
                PKT_TX_OUTER_IP_CKSUM |
-               PKT_TX_TUNNEL_MASK;
+               PKT_TX_TUNNEL_MASK |
+               PKT_TX_IEEE1588_TMST;
 
        return (flags & mask) ? 1 : 0;
 }
@@ -2667,6 +2694,12 @@ ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 
uint16_t nb_pkts)
                        if (ol_flags & PKT_TX_TCP_SEG)
                                cd_type_cmd_tso_mss |=
                                        ice_set_tso_ctx(tx_pkt, tx_offload);
+                       else {
+                               if (ol_flags & PKT_TX_IEEE1588_TMST)
+                                       cd_type_cmd_tso_mss |=
+                                          ((uint64_t)ICE_TX_CTX_DESC_TSYN <<
+                                           ICE_TXD_CTX_QW1_CMD_S);
+                       }
 
                        ctx_txd->tunneling_params =
                                rte_cpu_to_le_32(cd_tunneling_params);
diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h
index b10db08..ae0b436 100644
--- a/drivers/net/ice/ice_rxtx.h
+++ b/drivers/net/ice/ice_rxtx.h
@@ -89,6 +89,7 @@ struct ice_rx_queue {
        ice_rxd_to_pkt_fields_t rxd_to_pkt_fields; /* handle FlexiMD by RXDID */
        ice_rx_release_mbufs_t rx_rel_mbufs;
        uint64_t offloads;
+       uint32_t time_high; /* High value of the timestamp */
 };
 
 struct ice_tx_entry {
-- 
2.9.5

Reply via email to