Fix few comments and add a new one describing SW channel implementation
limitation

Signed-off-by: Rasesh Mody <rasesh.m...@cavium.com>
---
 drivers/net/qede/base/ecore_dcbx.c  |    2 +-
 drivers/net/qede/base/ecore_mcp.c   |    2 +-
 drivers/net/qede/base/ecore_sriov.h |    2 +-
 drivers/net/qede/base/ecore_vf.c    |    8 ++++++++
 4 files changed, 11 insertions(+), 3 deletions(-)

diff --git a/drivers/net/qede/base/ecore_dcbx.c 
b/drivers/net/qede/base/ecore_dcbx.c
index 201b7d5..a37b7c6 100644
--- a/drivers/net/qede/base/ecore_dcbx.c
+++ b/drivers/net/qede/base/ecore_dcbx.c
@@ -228,7 +228,7 @@ u8 ecore_dcbx_get_dscp_value(struct ecore_hwfn *p_hwfn, u8 
pri)
        return true;
 }
 
-/*  Parse app TLV's to update TC information in hw_info structure for
+/* Parse app TLV's to update TC information in hw_info structure for
  * reconfiguring QM. Get protocol specific data for PF update ramrod command.
  */
 static enum _ecore_status_t
diff --git a/drivers/net/qede/base/ecore_mcp.c 
b/drivers/net/qede/base/ecore_mcp.c
index f09462b..50f73e5 100644
--- a/drivers/net/qede/base/ecore_mcp.c
+++ b/drivers/net/qede/base/ecore_mcp.c
@@ -1339,7 +1339,7 @@ static void ecore_mcp_handle_link_change(struct 
ecore_hwfn *p_hwfn,
        __ecore_configure_pf_max_bandwidth(p_hwfn, p_ptt,
                                           p_link, max_bw);
 
-       /* Mintz bandwidth configuration */
+       /* Min bandwidth configuration */
        __ecore_configure_pf_min_bandwidth(p_hwfn, p_ptt,
                                           p_link, min_bw);
        ecore_configure_vp_wfq_on_link_change(p_hwfn->p_dev, p_ptt,
diff --git a/drivers/net/qede/base/ecore_sriov.h 
b/drivers/net/qede/base/ecore_sriov.h
index 0e83f0f..850b105 100644
--- a/drivers/net/qede/base/ecore_sriov.h
+++ b/drivers/net/qede/base/ecore_sriov.h
@@ -264,7 +264,7 @@ void ecore_dp_tlv_list(struct ecore_hwfn *p_hwfn,
  * @param p_hwfn
  * @param disabled_vfs - bitmask of all VFs on path that were FLRed
  *
- * @return 1 iff one of the PF's vfs got FLRed. 0 otherwise.
+ * @return true iff one of the PF's vfs got FLRed. false otherwise.
  */
 bool ecore_iov_mark_vf_flr(struct ecore_hwfn *p_hwfn,
                           u32 *disabled_vfs);
diff --git a/drivers/net/qede/base/ecore_vf.c b/drivers/net/qede/base/ecore_vf.c
index 9ad986c..c04fb09 100644
--- a/drivers/net/qede/base/ecore_vf.c
+++ b/drivers/net/qede/base/ecore_vf.c
@@ -65,6 +65,14 @@ static void ecore_vf_pf_req_end(struct ecore_hwfn *p_hwfn,
        OSAL_MUTEX_RELEASE(&p_hwfn->vf_iov_info->mutex);
 }
 
+#ifdef CONFIG_ECORE_SW_CHANNEL
+/* The SW channel implementation of Windows needs to know the 'exact'
+ * response size of any given message. That means that for future
+ * messages we'd be unable to send TLVs to PF if he'll be unable to
+ * answer them if the |response| != |default response|.
+ * We'd need to handshake in acquire capabilities for any such.
+ */
+#endif
 static enum _ecore_status_t
 ecore_send_msg2pf(struct ecore_hwfn *p_hwfn,
                  u8 *done, u32 resp_size)
-- 
1.7.10.3

Reply via email to