Added code to allocate VSI queue contexts to save the queue specific
information like bandwidth etc.

Signed-off-by: Victor Raj <victor....@intel.com>
Signed-off-by: Paul M Stillwell Jr <paul.m.stillwell...@intel.com>
Signed-off-by: Qi Zhang <qi.z.zh...@intel.com>
---
 drivers/net/ice/base/ice_sched.c  | 52 +++++++++++++++++++++++++++++++++++----
 drivers/net/ice/base/ice_switch.c | 26 ++++++++++++++++++++
 drivers/net/ice/base/ice_switch.h |  8 ++++++
 3 files changed, 81 insertions(+), 5 deletions(-)

diff --git a/drivers/net/ice/base/ice_sched.c b/drivers/net/ice/base/ice_sched.c
index 237bf7350..ab6097237 100644
--- a/drivers/net/ice/base/ice_sched.c
+++ b/drivers/net/ice/base/ice_sched.c
@@ -576,6 +576,48 @@ ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 
num_nodes, u32 *node_teids,
 }
 
 /**
+ * ice_alloc_lan_q_ctx - allocate LAN queue contexts for the given VSI and TC
+ * @hw: pointer to the HW struct
+ * @vsi_handle: VSI handle
+ * @tc: TC number
+ * @new_numqs: number of queues
+ */
+static enum ice_status
+ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
+{
+       struct ice_vsi_ctx *vsi_ctx;
+       struct ice_q_ctx *q_ctx;
+
+       vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
+       if (!vsi_ctx)
+               return ICE_ERR_PARAM;
+       /* allocate LAN queue contexts */
+       if (!vsi_ctx->lan_q_ctx[tc]) {
+               vsi_ctx->lan_q_ctx[tc] = (struct ice_q_ctx *)
+                       ice_calloc(hw, new_numqs, sizeof(*q_ctx));
+               if (!vsi_ctx->lan_q_ctx[tc])
+                       return ICE_ERR_NO_MEMORY;
+               vsi_ctx->num_lan_q_entries[tc] = new_numqs;
+               return ICE_SUCCESS;
+       }
+       /* num queues are increased, update the queue contexts */
+       if (new_numqs > vsi_ctx->num_lan_q_entries[tc]) {
+               u16 prev_num = vsi_ctx->num_lan_q_entries[tc];
+
+               q_ctx = (struct ice_q_ctx *)
+                       ice_calloc(hw, new_numqs, sizeof(*q_ctx));
+               if (!q_ctx)
+                       return ICE_ERR_NO_MEMORY;
+               ice_memcpy(q_ctx, vsi_ctx->lan_q_ctx[tc],
+                          prev_num * sizeof(*q_ctx), ICE_DMA_TO_NONDMA);
+               ice_free(hw, vsi_ctx->lan_q_ctx[tc]);
+               vsi_ctx->lan_q_ctx[tc] = q_ctx;
+               vsi_ctx->num_lan_q_entries[tc] = new_numqs;
+       }
+       return ICE_SUCCESS;
+}
+
+/**
  * ice_aq_rl_profile - performs a rate limiting task
  * @hw: pointer to the HW struct
  * @opcode:opcode for add, query, or remove profile(s)
@@ -1717,14 +1759,14 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info 
*pi, u16 vsi_handle,
        if (!vsi_ctx)
                return ICE_ERR_PARAM;
 
-       if (owner == ICE_SCHED_NODE_OWNER_LAN)
-               prev_numqs = vsi_ctx->sched.max_lanq[tc];
-       else
-               return ICE_ERR_PARAM;
-
+       prev_numqs = vsi_ctx->sched.max_lanq[tc];
        /* num queues are not changed or less than the previous number */
        if (new_numqs <= prev_numqs)
                return status;
+       status = ice_alloc_lan_q_ctx(hw, vsi_handle, tc, new_numqs);
+       if (status)
+               return status;
+
        if (new_numqs)
                ice_sched_calc_vsi_child_nodes(hw, new_numqs, new_num_nodes);
        /* Keep the max number of queue configuration all the time. Update the
diff --git a/drivers/net/ice/base/ice_switch.c 
b/drivers/net/ice/base/ice_switch.c
index 34637c1be..4b53636b4 100644
--- a/drivers/net/ice/base/ice_switch.c
+++ b/drivers/net/ice/base/ice_switch.c
@@ -3,6 +3,8 @@
  */
 
 #include "ice_switch.h"
+#include "ice_flex_type.h"
+#include "ice_flow.h"
 
 
 #define ICE_ETH_DA_OFFSET              0
@@ -447,6 +449,27 @@ ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct 
ice_vsi_ctx *vsi)
 }
 
 /**
+ * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
+ * @hw: pointer to the HW struct
+ * @vsi_handle: VSI handle
+ */
+static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
+{
+       struct ice_vsi_ctx *vsi;
+       u8 i;
+
+       vsi = ice_get_vsi_ctx(hw, vsi_handle);
+       if (!vsi)
+               return;
+       ice_for_each_traffic_class(i) {
+               if (vsi->lan_q_ctx[i]) {
+                       ice_free(hw, vsi->lan_q_ctx[i]);
+                       vsi->lan_q_ctx[i] = NULL;
+               }
+       }
+}
+
+/**
  * ice_clear_vsi_ctx - clear the VSI context entry
  * @hw: pointer to the HW struct
  * @vsi_handle: VSI handle
@@ -459,6 +482,9 @@ static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 
vsi_handle)
 
        vsi = ice_get_vsi_ctx(hw, vsi_handle);
        if (vsi) {
+               if (!LIST_EMPTY(&vsi->rss_list_head))
+                       ice_rem_all_rss_vsi_ctx(hw, vsi_handle);
+               ice_clear_vsi_q_ctx(hw, vsi_handle);
                ice_destroy_lock(&vsi->rss_locks);
                ice_free(hw, vsi);
                hw->vsi_ctx[vsi_handle] = NULL;
diff --git a/drivers/net/ice/base/ice_switch.h 
b/drivers/net/ice/base/ice_switch.h
index ebcfaa120..c6d31475c 100644
--- a/drivers/net/ice/base/ice_switch.h
+++ b/drivers/net/ice/base/ice_switch.h
@@ -15,6 +15,12 @@
 
 
 #define ICE_VSI_INVAL_ID 0xFFFF
+#define ICE_INVAL_Q_HANDLE 0xFFFF
+
+/* VSI queue context structure */
+struct ice_q_ctx {
+       u16  q_handle;
+};
 
 /* VSI context structure for add/get/update/free operations */
 struct ice_vsi_ctx {
@@ -25,6 +31,8 @@ struct ice_vsi_ctx {
        struct ice_aqc_vsi_props info;
        struct ice_sched_vsi_info sched;
        u8 alloc_from_pool;
+       u16 num_lan_q_entries[ICE_MAX_TRAFFIC_CLASS];
+       struct ice_q_ctx *lan_q_ctx[ICE_MAX_TRAFFIC_CLASS];
        struct ice_lock rss_locks;      /* protect rss config in VSI ctx */
        struct LIST_HEAD_TYPE rss_list_head;
 };
-- 
2.13.6

Reply via email to