From: Oleksandr Kolomeiets <okl-...@napatech.com>

Add supporting API rte_eth_dev_set_mtu

Signed-off-by: Oleksandr Kolomeiets <okl-...@napatech.com>
---
 drivers/net/ntnic/include/flow_api_engine.h   |  7 ++
 drivers/net/ntnic/include/hw_mod_backend.h    |  4 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c   | 96 +++++++++++++++++++
 .../profile_inline/flow_api_profile_inline.c  | 82 +++++++++++++++-
 .../profile_inline/flow_api_profile_inline.h  |  9 ++
 .../flow_api_profile_inline_config.h          | 50 ++++++++++
 drivers/net/ntnic/ntnic_ethdev.c              | 41 ++++++++
 drivers/net/ntnic/ntnic_mod_reg.h             |  5 +
 8 files changed, 292 insertions(+), 2 deletions(-)

diff --git a/drivers/net/ntnic/include/flow_api_engine.h 
b/drivers/net/ntnic/include/flow_api_engine.h
index 8604dde995..5eace2614f 100644
--- a/drivers/net/ntnic/include/flow_api_engine.h
+++ b/drivers/net/ntnic/include/flow_api_engine.h
@@ -280,6 +280,11 @@ struct nic_flow_def {
         * AGE action timeout
         */
        struct age_def_s age;
+
+       /*
+        * TX fragmentation IFR/RPP_LR MTU recipe
+        */
+       uint8_t flm_mtu_fragmentation_recipe;
 };
 
 enum flow_handle_type {
@@ -340,6 +345,8 @@ struct flow_handle {
                        uint8_t flm_qfi;
                        uint8_t flm_scrub_prof;
 
+                       uint8_t flm_mtu_fragmentation_recipe;
+
                        /* Flow specific pointer to application template table 
cell stored during
                         * flow create.
                         */
diff --git a/drivers/net/ntnic/include/hw_mod_backend.h 
b/drivers/net/ntnic/include/hw_mod_backend.h
index 7a36e4c6d6..f91a3ed058 100644
--- a/drivers/net/ntnic/include/hw_mod_backend.h
+++ b/drivers/net/ntnic/include/hw_mod_backend.h
@@ -958,8 +958,12 @@ int hw_mod_tpe_rpp_rcp_set(struct flow_api_backend_s *be, 
enum hw_tpe_e field, i
        uint32_t value);
 
 int hw_mod_tpe_rpp_ifr_rcp_flush(struct flow_api_backend_s *be, int start_idx, 
int count);
+int hw_mod_tpe_rpp_ifr_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e 
field, int index,
+       uint32_t value);
 
 int hw_mod_tpe_ifr_rcp_flush(struct flow_api_backend_s *be, int start_idx, int 
count);
+int hw_mod_tpe_ifr_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field, 
int index,
+       uint32_t value);
 
 int hw_mod_tpe_ins_rcp_flush(struct flow_api_backend_s *be, int start_idx, int 
count);
 int hw_mod_tpe_ins_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field, 
int index,
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c 
b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c
index ba8f2d0dbb..2c3ed2355b 100644
--- a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c
@@ -152,6 +152,54 @@ int hw_mod_tpe_rpp_ifr_rcp_flush(struct flow_api_backend_s 
*be, int start_idx, i
        return be->iface->tpe_rpp_ifr_rcp_flush(be->be_dev, &be->tpe, 
start_idx, count);
 }
 
+static int hw_mod_tpe_rpp_ifr_rcp_mod(struct flow_api_backend_s *be, enum 
hw_tpe_e field,
+       uint32_t index, uint32_t *value, int get)
+{
+       if (index >= be->tpe.nb_ifr_categories)
+               return INDEX_TOO_LARGE;
+
+       switch (_VER_) {
+       case 3:
+               switch (field) {
+               case HW_TPE_IFR_RCP_IPV4_EN:
+                       GET_SET(be->tpe.v3.rpp_ifr_rcp[index].ipv4_en, value);
+                       break;
+
+               case HW_TPE_IFR_RCP_IPV4_DF_DROP:
+                       GET_SET(be->tpe.v3.rpp_ifr_rcp[index].ipv4_df_drop, 
value);
+                       break;
+
+               case HW_TPE_IFR_RCP_IPV6_EN:
+                       GET_SET(be->tpe.v3.rpp_ifr_rcp[index].ipv6_en, value);
+                       break;
+
+               case HW_TPE_IFR_RCP_IPV6_DROP:
+                       GET_SET(be->tpe.v3.rpp_ifr_rcp[index].ipv6_drop, value);
+                       break;
+
+               case HW_TPE_IFR_RCP_MTU:
+                       GET_SET(be->tpe.v3.rpp_ifr_rcp[index].mtu, value);
+                       break;
+
+               default:
+                       return UNSUP_FIELD;
+               }
+
+               break;
+
+       default:
+               return UNSUP_VER;
+       }
+
+       return 0;
+}
+
+int hw_mod_tpe_rpp_ifr_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e 
field, int index,
+       uint32_t value)
+{
+       return hw_mod_tpe_rpp_ifr_rcp_mod(be, field, index, &value, 0);
+}
+
 /*
  * RPP_RCP
  */
@@ -262,6 +310,54 @@ int hw_mod_tpe_ifr_rcp_flush(struct flow_api_backend_s 
*be, int start_idx, int c
        return be->iface->tpe_ifr_rcp_flush(be->be_dev, &be->tpe, start_idx, 
count);
 }
 
+static int hw_mod_tpe_ifr_rcp_mod(struct flow_api_backend_s *be, enum hw_tpe_e 
field,
+       uint32_t index, uint32_t *value, int get)
+{
+       if (index >= be->tpe.nb_ifr_categories)
+               return INDEX_TOO_LARGE;
+
+       switch (_VER_) {
+       case 3:
+               switch (field) {
+               case HW_TPE_IFR_RCP_IPV4_EN:
+                       GET_SET(be->tpe.v3.ifr_rcp[index].ipv4_en, value);
+                       break;
+
+               case HW_TPE_IFR_RCP_IPV4_DF_DROP:
+                       GET_SET(be->tpe.v3.ifr_rcp[index].ipv4_df_drop, value);
+                       break;
+
+               case HW_TPE_IFR_RCP_IPV6_EN:
+                       GET_SET(be->tpe.v3.ifr_rcp[index].ipv6_en, value);
+                       break;
+
+               case HW_TPE_IFR_RCP_IPV6_DROP:
+                       GET_SET(be->tpe.v3.ifr_rcp[index].ipv6_drop, value);
+                       break;
+
+               case HW_TPE_IFR_RCP_MTU:
+                       GET_SET(be->tpe.v3.ifr_rcp[index].mtu, value);
+                       break;
+
+               default:
+                       return UNSUP_FIELD;
+               }
+
+               break;
+
+       default:
+               return UNSUP_VER;
+       }
+
+       return 0;
+}
+
+int hw_mod_tpe_ifr_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field, 
int index,
+       uint32_t value)
+{
+       return hw_mod_tpe_ifr_rcp_mod(be, field, index, &value, 0);
+}
+
 /*
  * INS_RCP
  */
diff --git 
a/drivers/net/ntnic/nthw/flow_api/profile_inline/flow_api_profile_inline.c 
b/drivers/net/ntnic/nthw/flow_api/profile_inline/flow_api_profile_inline.c
index 89e7041350..42d4c19eaa 100644
--- a/drivers/net/ntnic/nthw/flow_api/profile_inline/flow_api_profile_inline.c
+++ b/drivers/net/ntnic/nthw/flow_api/profile_inline/flow_api_profile_inline.c
@@ -803,6 +803,11 @@ static inline void set_key_def_sw(struct 
flm_flow_key_def_s *key_def, unsigned i
        }
 }
 
+static inline uint8_t convert_port_to_ifr_mtu_recipe(uint32_t port)
+{
+       return port + 1;
+}
+
 static uint8_t get_port_from_port_id(const struct flow_nic_dev *ndev, uint32_t 
port_id)
 {
        struct flow_eth_dev *dev = ndev->eth_base;
@@ -1023,6 +1028,8 @@ static int flm_flow_programming(struct flow_handle *fh, 
uint32_t flm_op)
        learn_record->rqi = fh->flm_rqi;
        /* Lower 10 bits used for RPL EXT PTR */
        learn_record->color = fh->flm_rpl_ext_ptr & 0x3ff;
+       /* Bit [13:10] used for MTU recipe */
+       learn_record->color |= (fh->flm_mtu_fragmentation_recipe & 0xf) << 10;
 
        learn_record->ent = 0;
        learn_record->op = flm_op & 0xf;
@@ -1121,6 +1128,9 @@ static int interpret_flow_actions(const struct 
flow_eth_dev *dev,
                                fd->dst_id[fd->dst_num_avail].active = 1;
                                fd->dst_num_avail++;
 
+                               fd->flm_mtu_fragmentation_recipe =
+                                       convert_port_to_ifr_mtu_recipe(port);
+
                                if (fd->full_offload < 0)
                                        fd->full_offload = 1;
 
@@ -3070,6 +3080,8 @@ static void copy_fd_to_fh_flm(struct flow_handle *fh, 
const struct nic_flow_def
                        break;
                }
        }
+
+       fh->flm_mtu_fragmentation_recipe = fd->flm_mtu_fragmentation_recipe;
        fh->context = fd->age.context;
 }
 
@@ -3187,7 +3199,7 @@ static int setup_flow_flm_actions(struct flow_eth_dev 
*dev,
        /* Setup COT */
        struct hw_db_inline_cot_data cot_data = {
                .matcher_color_contrib = empty_pattern ? 0x0 : 0x4,     /* FT 
key C */
-               .frag_rcp = 0,
+               .frag_rcp = empty_pattern ? fd->flm_mtu_fragmentation_recipe : 
0,
        };
        struct hw_db_cot_idx cot_idx =
                hw_db_inline_cot_add(dev->ndev, dev->ndev->hw_db_handle, 
&cot_data);
@@ -3501,7 +3513,7 @@ static struct flow_handle *create_flow_filter(struct 
flow_eth_dev *dev, struct n
                        /* Setup COT */
                        struct hw_db_inline_cot_data cot_data = {
                                .matcher_color_contrib = 0,
-                               .frag_rcp = 0,
+                               .frag_rcp = fd->flm_mtu_fragmentation_recipe,
                        };
                        struct hw_db_cot_idx cot_idx =
                                hw_db_inline_cot_add(dev->ndev, 
dev->ndev->hw_db_handle,
@@ -5416,6 +5428,67 @@ int flow_get_flm_stats_profile_inline(struct 
flow_nic_dev *ndev, uint64_t *data,
        return 0;
 }
 
+int flow_set_mtu_inline(struct flow_eth_dev *dev, uint32_t port, uint16_t mtu)
+{
+       if (port >= 255)
+               return -1;
+
+       uint32_t ipv4_en_frag;
+       uint32_t ipv4_action;
+       uint32_t ipv6_en_frag;
+       uint32_t ipv6_action;
+
+       if (port == 0) {
+               ipv4_en_frag = PORT_0_IPV4_FRAGMENTATION;
+               ipv4_action = PORT_0_IPV4_DF_ACTION;
+               ipv6_en_frag = PORT_0_IPV6_FRAGMENTATION;
+               ipv6_action = PORT_0_IPV6_ACTION;
+
+       } else if (port == 1) {
+               ipv4_en_frag = PORT_1_IPV4_FRAGMENTATION;
+               ipv4_action = PORT_1_IPV4_DF_ACTION;
+               ipv6_en_frag = PORT_1_IPV6_FRAGMENTATION;
+               ipv6_action = PORT_1_IPV6_ACTION;
+
+       } else {
+               ipv4_en_frag = DISABLE_FRAGMENTATION;
+               ipv4_action = IPV4_DF_DROP;
+               ipv6_en_frag = DISABLE_FRAGMENTATION;
+               ipv6_action = IPV6_DROP;
+       }
+
+       int err = 0;
+       uint8_t ifr_mtu_recipe = convert_port_to_ifr_mtu_recipe(port);
+       struct flow_nic_dev *ndev = dev->ndev;
+
+       err |= hw_mod_tpe_rpp_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_IPV4_EN, 
ifr_mtu_recipe,
+                       ipv4_en_frag);
+       err |= hw_mod_tpe_rpp_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_IPV6_EN, 
ifr_mtu_recipe,
+                       ipv6_en_frag);
+       err |= hw_mod_tpe_rpp_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_MTU, 
ifr_mtu_recipe, mtu);
+       err |= hw_mod_tpe_rpp_ifr_rcp_set(&ndev->be, 
HW_TPE_IFR_RCP_IPV4_DF_DROP, ifr_mtu_recipe,
+                       ipv4_action);
+       err |= hw_mod_tpe_rpp_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_IPV6_DROP, 
ifr_mtu_recipe,
+                       ipv6_action);
+
+       err |= hw_mod_tpe_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_IPV4_EN, 
ifr_mtu_recipe,
+                       ipv4_en_frag);
+       err |= hw_mod_tpe_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_IPV6_EN, 
ifr_mtu_recipe,
+                       ipv6_en_frag);
+       err |= hw_mod_tpe_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_MTU, 
ifr_mtu_recipe, mtu);
+       err |= hw_mod_tpe_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_IPV4_DF_DROP, 
ifr_mtu_recipe,
+                       ipv4_action);
+       err |= hw_mod_tpe_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_IPV6_DROP, 
ifr_mtu_recipe,
+                       ipv6_action);
+
+       if (err == 0) {
+               err |= hw_mod_tpe_rpp_ifr_rcp_flush(&ndev->be, ifr_mtu_recipe, 
1);
+               err |= hw_mod_tpe_ifr_rcp_flush(&ndev->be, ifr_mtu_recipe, 1);
+       }
+
+       return err;
+}
+
 int flow_info_get_profile_inline(struct flow_eth_dev *dev, uint8_t caller_id,
        struct rte_flow_port_info *port_info,
        struct rte_flow_queue_info *queue_info, struct rte_flow_error *error)
@@ -6000,6 +6073,11 @@ static const struct profile_inline_ops ops = {
        .flm_free_queues = flm_free_queues,
        .flm_mtr_read_stats = flm_mtr_read_stats,
        .flm_update = flm_update,
+
+       /*
+        * Config API
+        */
+       .flow_set_mtu_inline = flow_set_mtu_inline,
 };
 
 void profile_inline_init(void)
diff --git 
a/drivers/net/ntnic/nthw/flow_api/profile_inline/flow_api_profile_inline.h 
b/drivers/net/ntnic/nthw/flow_api/profile_inline/flow_api_profile_inline.h
index 0dc89085ec..ce1a0669ee 100644
--- a/drivers/net/ntnic/nthw/flow_api/profile_inline/flow_api_profile_inline.h
+++ b/drivers/net/ntnic/nthw/flow_api/profile_inline/flow_api_profile_inline.h
@@ -11,6 +11,10 @@
 #include "flow_api.h"
 #include "stream_binary_flow_api.h"
 
+#define DISABLE_FRAGMENTATION 0
+#define IPV4_DF_DROP 1
+#define IPV6_DROP 1
+
 /*
  * Management
  */
@@ -120,4 +124,9 @@ int flow_configure_profile_inline(struct flow_eth_dev *dev, 
uint8_t caller_id,
        const struct rte_flow_queue_attr *queue_attr[],
        struct rte_flow_error *error);
 
+/*
+ * Config API
+ */
+int flow_set_mtu_inline(struct flow_eth_dev *dev, uint32_t port, uint16_t mtu);
+
 #endif /* _FLOW_API_PROFILE_INLINE_H_ */
diff --git 
a/drivers/net/ntnic/nthw/flow_api/profile_inline/flow_api_profile_inline_config.h
 
b/drivers/net/ntnic/nthw/flow_api/profile_inline/flow_api_profile_inline_config.h
index 3b53288ddf..c665cab16a 100644
--- 
a/drivers/net/ntnic/nthw/flow_api/profile_inline/flow_api_profile_inline_config.h
+++ 
b/drivers/net/ntnic/nthw/flow_api/profile_inline/flow_api_profile_inline_config.h
@@ -6,6 +6,56 @@
 #ifndef _FLOW_API_PROFILE_INLINE_CONFIG_H_
 #define _FLOW_API_PROFILE_INLINE_CONFIG_H_
 
+/*
+ * Per port configuration for IPv4 fragmentation and DF flag handling
+ *
+ * 
||-------------------------------------||-------------------------||----------||
+ * ||              Configuration          || Egress packet type      ||        
  ||
+ * ||-------------------------------------||-------------------------|| Action 
  ||
+ * || IPV4_FRAGMENTATION | IPV4_DF_ACTION || Exceeding MTU | DF flag ||        
  ||
+ * 
||-------------------------------------||-------------------------||----------||
+ * ||   DISABLE          |    -           ||      -       |    -     || 
Forward  ||
+ * 
||-------------------------------------||-------------------------||----------||
+ * ||   ENABLE           |    DF_DROP     ||      no      |    -     || 
Forward  ||
+ * ||                    |                ||      yes     |    0     || 
Fragment ||
+ * ||                    |                ||      yes     |    1     || Drop   
  ||
+ * 
||-------------------------------------||-------------------------||----------||
+ * ||   ENABLE           |    DF_FORWARD  ||      no      |    -     || 
Forward  ||
+ * ||                    |                ||      yes     |    0     || 
Fragment ||
+ * ||                    |                ||      yes     |    1     || 
Forward  ||
+ * 
||-------------------------------------||-------------------------||----------||
+ */
+
+#define PORT_0_IPV4_FRAGMENTATION DISABLE_FRAGMENTATION
+#define PORT_0_IPV4_DF_ACTION IPV4_DF_DROP
+
+#define PORT_1_IPV4_FRAGMENTATION DISABLE_FRAGMENTATION
+#define PORT_1_IPV4_DF_ACTION IPV4_DF_DROP
+
+/*
+ * Per port configuration for IPv6 fragmentation
+ *
+ * 
||-------------------------------------||-------------------------||----------||
+ * ||              Configuration          || Egress packet type      ||        
  ||
+ * ||-------------------------------------||-------------------------|| Action 
  ||
+ * || IPV6_FRAGMENTATION | IPV6_ACTION    || Exceeding MTU           ||        
  ||
+ * 
||-------------------------------------||-------------------------||----------||
+ * ||   DISABLE          |    -           ||      -                  || 
Forward  ||
+ * 
||-------------------------------------||-------------------------||----------||
+ * ||   ENABLE           |    DROP        ||      no                 || 
Forward  ||
+ * ||                    |                ||      yes                || Drop   
  ||
+ * 
||-------------------------------------||-------------------------||----------||
+ * ||   ENABLE           |    FRAGMENT    ||      no                 || 
Forward  ||
+ * ||                    |                ||      yes                || 
Fragment ||
+ * 
||-------------------------------------||-------------------------||----------||
+ */
+
+#define PORT_0_IPV6_FRAGMENTATION DISABLE_FRAGMENTATION
+#define PORT_0_IPV6_ACTION IPV6_DROP
+
+#define PORT_1_IPV6_FRAGMENTATION DISABLE_FRAGMENTATION
+#define PORT_1_IPV6_ACTION IPV6_DROP
+
 /*
  * Statistics are generated each time the byte counter crosses a limit.
  * If BYTE_LIMIT is zero then the byte counter does not trigger statistics
diff --git a/drivers/net/ntnic/ntnic_ethdev.c b/drivers/net/ntnic/ntnic_ethdev.c
index 77436eb02d..2a2643a106 100644
--- a/drivers/net/ntnic/ntnic_ethdev.c
+++ b/drivers/net/ntnic/ntnic_ethdev.c
@@ -39,6 +39,7 @@ const rte_thread_attr_t thread_attr = { .priority = 
RTE_THREAD_PRIORITY_NORMAL }
 #define THREAD_RETURN (0)
 #define HW_MAX_PKT_LEN (10000)
 #define MAX_MTU (HW_MAX_PKT_LEN - RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN)
+#define MIN_MTU_INLINE 512
 
 #define EXCEPTION_PATH_HID 0
 
@@ -70,6 +71,8 @@ const rte_thread_attr_t thread_attr = { .priority = 
RTE_THREAD_PRIORITY_NORMAL }
 #define MAX_RX_PACKETS   128
 #define MAX_TX_PACKETS   128
 
+#define MTUINITVAL 1500
+
 uint64_t rte_tsc_freq;
 
 static void (*previous_handler)(int sig);
@@ -338,6 +341,7 @@ eth_dev_infos_get(struct rte_eth_dev *eth_dev, struct 
rte_eth_dev_info *dev_info
        dev_info->max_mtu = MAX_MTU;
 
        if (p_adapter_info->fpga_info.profile == FPGA_INFO_PROFILE_INLINE) {
+               dev_info->min_mtu = MIN_MTU_INLINE;
                dev_info->flow_type_rss_offloads = NT_ETH_RSS_OFFLOAD_MASK;
                dev_info->hash_key_size = MAX_RSS_KEY_LEN;
 
@@ -1149,6 +1153,26 @@ static int eth_tx_scg_queue_setup(struct rte_eth_dev 
*eth_dev,
        return 0;
 }
 
+static int dev_set_mtu_inline(struct rte_eth_dev *eth_dev, uint16_t mtu)
+{
+       const struct profile_inline_ops *profile_inline_ops = 
get_profile_inline_ops();
+
+       if (profile_inline_ops == NULL) {
+               NT_LOG_DBGX(ERR, NTNIC, "profile_inline module uninitialized");
+               return -1;
+       }
+
+       struct pmd_internals *internals = (struct pmd_internals 
*)eth_dev->data->dev_private;
+
+       struct flow_eth_dev *flw_dev = internals->flw_dev;
+       int ret = -1;
+
+       if (internals->type == PORT_TYPE_PHYSICAL && mtu >= MIN_MTU_INLINE && 
mtu <= MAX_MTU)
+               ret = profile_inline_ops->flow_set_mtu_inline(flw_dev, 
internals->port, mtu);
+
+       return ret ? -EINVAL : 0;
+}
+
 static int eth_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t 
rx_queue_id)
 {
        eth_dev->data->rx_queue_state[rx_queue_id] = 
RTE_ETH_QUEUE_STATE_STARTED;
@@ -1714,6 +1738,7 @@ static struct eth_dev_ops nthw_eth_dev_ops = {
        .xstats_reset = eth_xstats_reset,
        .xstats_get_by_id = eth_xstats_get_by_id,
        .xstats_get_names_by_id = eth_xstats_get_names_by_id,
+       .mtu_set = NULL,
        .promiscuous_enable = promiscuous_enable,
        .rss_hash_update = eth_dev_rss_hash_update,
        .rss_hash_conf_get = rss_hash_conf_get,
@@ -2277,6 +2302,7 @@ nthw_pci_dev_init(struct rte_pci_device *pci_dev)
                internals->pci_dev = pci_dev;
                internals->n_intf_no = n_intf_no;
                internals->type = PORT_TYPE_PHYSICAL;
+               internals->port = n_intf_no;
                internals->nb_rx_queues = nb_rx_queues;
                internals->nb_tx_queues = nb_tx_queues;
 
@@ -2386,6 +2412,21 @@ nthw_pci_dev_init(struct rte_pci_device *pci_dev)
                /* increase initialized ethernet devices - PF */
                p_drv->n_eth_dev_init_count++;
 
+               if (get_flow_filter_ops() != NULL) {
+                       if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE &&
+                               internals->flw_dev->ndev->be.tpe.ver >= 2) {
+                               assert(nthw_eth_dev_ops.mtu_set == 
dev_set_mtu_inline ||
+                                       nthw_eth_dev_ops.mtu_set == NULL);
+                               nthw_eth_dev_ops.mtu_set = dev_set_mtu_inline;
+                               dev_set_mtu_inline(eth_dev, MTUINITVAL);
+                               NT_LOG_DBGX(DBG, NTNIC, "INLINE MTU supported, 
tpe version %d",
+                                       internals->flw_dev->ndev->be.tpe.ver);
+
+                       } else {
+                               NT_LOG(DBG, NTNIC, "INLINE MTU not supported");
+                       }
+               }
+
                /* Port event thread */
                if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE) {
                        res = THREAD_CTRL_CREATE(&p_nt_drv->port_event_thread, 
"nt_port_event_thr",
diff --git a/drivers/net/ntnic/ntnic_mod_reg.h 
b/drivers/net/ntnic/ntnic_mod_reg.h
index eb764356eb..71861c6dea 100644
--- a/drivers/net/ntnic/ntnic_mod_reg.h
+++ b/drivers/net/ntnic/ntnic_mod_reg.h
@@ -408,6 +408,11 @@ struct profile_inline_ops {
        const struct rte_flow_port_attr *port_attr, uint16_t nb_queue,
        const struct rte_flow_queue_attr *queue_attr[],
        struct rte_flow_error *error);
+
+       /*
+        * Config API
+        */
+       int (*flow_set_mtu_inline)(struct flow_eth_dev *dev, uint32_t port, 
uint16_t mtu);
 };
 
 void register_profile_inline_ops(const struct profile_inline_ops *ops);
-- 
2.45.0

Reply via email to