From: Dillon Varone <dillon.var...@amd.com>

[WHY&HOW]
When command submission is blocked by a full mailbox, only wait for
enough space to free to submit the command, instead of waiting for idle.

Reviewed-by: Nicholas Kazlauskas <nicholas.kazlaus...@amd.com>
Signed-off-by: Dillon Varone <dillon.var...@amd.com>
Signed-off-by: Ray Wu <ray...@amd.com>
---
 drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c  |  2 +-
 drivers/gpu/drm/amd/display/dmub/dmub_srv.h   | 19 +++++
 .../gpu/drm/amd/display/dmub/src/dmub_srv.c   | 72 ++++++++++++++-----
 3 files changed, 76 insertions(+), 17 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c 
b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index a3fbb9f5b4a6..6115b5364394 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -207,7 +207,7 @@ static bool dc_dmub_srv_fb_cmd_list_queue_execute(struct 
dc_dmub_srv *dc_dmub_sr
                                return false;
 
                        do {
-                               status = dmub_srv_wait_for_idle(dmub, 100000);
+                               status = dmub_srv_wait_for_inbox_free(dmub, 
100000, count - i);
                        } while (dc_dmub_srv->ctx->dc->debug.disable_timeout && 
status != DMUB_STATUS_OK);
 
                        /* Requeue the command. */
diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h 
b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
index 440a426b81c1..e759ce6ca700 100644
--- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
@@ -1034,4 +1034,23 @@ void dmub_srv_cmd_get_response(struct dmub_srv *dmub,
  */
 enum dmub_status dmub_srv_sync_inboxes(struct dmub_srv *dmub);
 
+/**
+ * dmub_srv_wait_for_inbox_free() - Waits for space in the DMUB inbox to free 
up
+ * @dmub: the dmub service
+ * @timeout_us: the maximum number of microseconds to wait
+ * @num_free_required: number of free entries required
+ *
+ * Waits until the DMUB buffer is freed to the specified number.
+ *  The maximum wait time is given in microseconds to prevent spinning
+ * forever.
+ *
+ * Return:
+ *   DMUB_STATUS_OK - success
+ *   DMUB_STATUS_TIMEOUT - wait for buffer to flush timed out
+ *   DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status dmub_srv_wait_for_inbox_free(struct dmub_srv *dmub,
+               uint32_t timeout_us,
+               uint32_t num_free_required);
+
 #endif /* _DMUB_SRV_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c 
b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
index 07bb1d4c4287..c917a70b3c19 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
@@ -978,32 +978,45 @@ enum dmub_status dmub_srv_wait_for_pending(struct 
dmub_srv *dmub,
        return DMUB_STATUS_TIMEOUT;
 }
 
+static enum dmub_status dmub_srv_update_inbox_status(struct dmub_srv *dmub)
+{
+       uint32_t rptr;
+
+       /* update inbox1 state */
+               rptr = dmub->hw_funcs.get_inbox1_rptr(dmub);
+
+       if (rptr > dmub->inbox1.rb.capacity)
+               return DMUB_STATUS_HW_FAILURE;
+
+       if (dmub->inbox1.rb.rptr > rptr) {
+               /* rb wrapped */
+               dmub->inbox1.num_reported += (rptr + dmub->inbox1.rb.capacity - 
dmub->inbox1.rb.rptr) / DMUB_RB_CMD_SIZE;
+       } else {
+               dmub->inbox1.num_reported += (rptr - dmub->inbox1.rb.rptr) / 
DMUB_RB_CMD_SIZE;
+       }
+       dmub->inbox1.rb.rptr = rptr;
+
+       /* update reg_inbox0 */
+       dmub_srv_update_reg_inbox0_status(dmub);
+
+       return DMUB_STATUS_OK;
+}
+
 enum dmub_status dmub_srv_wait_for_idle(struct dmub_srv *dmub,
                                        uint32_t timeout_us)
 {
-       uint32_t i, rptr;
+       enum dmub_status status;
+       uint32_t i;
        const uint32_t polling_interval_us = 1;
 
        if (!dmub->hw_init)
                return DMUB_STATUS_INVALID;
 
        for (i = 0; i < timeout_us; i += polling_interval_us) {
-               /* update inbox1 state */
-                       rptr = dmub->hw_funcs.get_inbox1_rptr(dmub);
+               status = dmub_srv_update_inbox_status(dmub);
 
-               if (rptr > dmub->inbox1.rb.capacity)
-                       return DMUB_STATUS_HW_FAILURE;
-
-               if (dmub->inbox1.rb.rptr > rptr) {
-                       /* rb wrapped */
-                       dmub->inbox1.num_reported += (rptr + 
dmub->inbox1.rb.capacity - dmub->inbox1.rb.rptr) / DMUB_RB_CMD_SIZE;
-               } else {
-                       dmub->inbox1.num_reported += (rptr - 
dmub->inbox1.rb.rptr) / DMUB_RB_CMD_SIZE;
-               }
-               dmub->inbox1.rb.rptr = rptr;
-
-               /* update reg_inbox0 */
-               dmub_srv_update_reg_inbox0_status(dmub);
+               if (status != DMUB_STATUS_OK)
+                       return status;
 
                /* check for idle */
                if (dmub_rb_empty(&dmub->inbox1.rb) && 
!dmub->reg_inbox0.is_pending)
@@ -1313,3 +1326,30 @@ enum dmub_status dmub_srv_sync_inboxes(struct dmub_srv 
*dmub)
 
        return DMUB_STATUS_OK;
 }
+
+enum dmub_status dmub_srv_wait_for_inbox_free(struct dmub_srv *dmub,
+               uint32_t timeout_us,
+               uint32_t num_free_required)
+{
+       enum dmub_status status;
+       uint32_t i;
+       const uint32_t polling_interval_us = 1;
+
+       if (!dmub->hw_init)
+               return DMUB_STATUS_INVALID;
+
+       for (i = 0; i < timeout_us; i += polling_interval_us) {
+               status = dmub_srv_update_inbox_status(dmub);
+
+               if (status != DMUB_STATUS_OK)
+                       return status;
+
+               /* check for space in inbox1 */
+               if (dmub_rb_num_free(&dmub->inbox1.rb) >= num_free_required)
+                       return DMUB_STATUS_OK;
+
+               udelay(polling_interval_us);
+       }
+
+       return DMUB_STATUS_TIMEOUT;
+}
-- 
2.43.0

Reply via email to