From: Ville Syrjälä <ville.syrj...@linux.intel.com>

Collect a bit of the stuff used during the plane ddb
allocation into a struct we can pass around.

Signed-off-by: Ville Syrjälä <ville.syrj...@linux.intel.com>
---
 drivers/gpu/drm/i915/intel_pm.c | 94 ++++++++++++++++-----------------
 1 file changed, 47 insertions(+), 47 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 2b14f23759ec..d951dab840f9 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -4801,6 +4801,13 @@ skl_plane_wm_level(const struct intel_crtc_state 
*crtc_state,
        return &wm->wm[level];
 }
 
+struct skl_plane_ddb_iter {
+       u64 data_rate;
+       u16 total[I915_MAX_PLANES];
+       u16 uv_total[I915_MAX_PLANES];
+       u16 start, size;
+};
+
 static int
 skl_allocate_pipe_ddb(struct intel_atomic_state *state,
                      struct intel_crtc *crtc)
@@ -4809,10 +4816,7 @@ skl_allocate_pipe_ddb(struct intel_atomic_state *state,
                intel_atomic_get_new_crtc_state(state, crtc);
        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        struct skl_ddb_entry *alloc = &crtc_state->wm.skl.ddb;
-       u16 alloc_size, start = 0;
-       u16 total[I915_MAX_PLANES] = {};
-       u16 uv_total[I915_MAX_PLANES] = {};
-       u64 total_data_rate;
+       struct skl_plane_ddb_iter iter = {};
        enum plane_id plane_id;
        int num_active;
        u32 blocks;
@@ -4854,28 +4858,26 @@ skl_allocate_pipe_ddb(struct intel_atomic_state *state,
        }
 
        if (INTEL_GEN(dev_priv) >= 11)
-               total_data_rate =
-                       icl_get_total_relative_data_rate(state, crtc);
+               iter.data_rate = icl_get_total_relative_data_rate(state, crtc);
        else
-               total_data_rate =
-                       skl_get_total_relative_data_rate(state, crtc);
+               iter.data_rate = skl_get_total_relative_data_rate(state, crtc);
 
        ret = skl_ddb_get_pipe_allocation_limits(dev_priv, crtc_state,
                                                 alloc, &num_active);
        if (ret)
                return ret;
 
-       alloc_size = skl_ddb_entry_size(alloc);
-       if (alloc_size == 0)
+       iter.size = skl_ddb_entry_size(alloc);
+       if (iter.size == 0)
                return 0;
 
        /* Allocate fixed number of blocks for cursor. */
-       total[PLANE_CURSOR] = skl_cursor_allocation(crtc_state, num_active);
-       alloc_size -= total[PLANE_CURSOR];
+       iter.total[PLANE_CURSOR] = skl_cursor_allocation(crtc_state, 
num_active);
+       iter.size -= iter.total[PLANE_CURSOR];
        skl_ddb_entry_init(&crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR],
-                          alloc->end - total[PLANE_CURSOR], alloc->end);
+                          alloc->end - iter.total[PLANE_CURSOR], alloc->end);
 
-       if (total_data_rate == 0)
+       if (iter.data_rate == 0)
                return 0;
 
        /*
@@ -4889,7 +4891,7 @@ skl_allocate_pipe_ddb(struct intel_atomic_state *state,
                                &crtc_state->wm.skl.optimal.planes[plane_id];
 
                        if (plane_id == PLANE_CURSOR) {
-                               if (wm->wm[level].min_ddb_alloc > 
total[PLANE_CURSOR]) {
+                               if (wm->wm[level].min_ddb_alloc > 
iter.total[PLANE_CURSOR]) {
                                        drm_WARN_ON(&dev_priv->drm,
                                                    wm->wm[level].min_ddb_alloc 
!= U16_MAX);
                                        blocks = U32_MAX;
@@ -4902,8 +4904,8 @@ skl_allocate_pipe_ddb(struct intel_atomic_state *state,
                        blocks += wm->uv_wm[level].min_ddb_alloc;
                }
 
-               if (blocks <= alloc_size) {
-                       alloc_size -= blocks;
+               if (blocks <= iter.size) {
+                       iter.size -= blocks;
                        break;
                }
        }
@@ -4912,7 +4914,7 @@ skl_allocate_pipe_ddb(struct intel_atomic_state *state,
                drm_dbg_kms(&dev_priv->drm,
                            "Requested display configuration exceeds system DDB 
limitations");
                drm_dbg_kms(&dev_priv->drm, "minimum required %d/%d\n",
-                           blocks, alloc_size);
+                           blocks, iter.size);
                return -EINVAL;
        }
 
@@ -4924,7 +4926,7 @@ skl_allocate_pipe_ddb(struct intel_atomic_state *state,
        for_each_plane_id_on_crtc(crtc, plane_id) {
                const struct skl_plane_wm *wm =
                        &crtc_state->wm.skl.optimal.planes[plane_id];
-               u64 rate;
+               u64 data_rate;
                u16 extra;
 
                if (plane_id == PLANE_CURSOR)
@@ -4934,32 +4936,30 @@ skl_allocate_pipe_ddb(struct intel_atomic_state *state,
                 * We've accounted for all active planes; remaining planes are
                 * all disabled.
                 */
-               if (total_data_rate == 0)
+               if (iter.data_rate == 0)
                        break;
 
-               rate = crtc_state->plane_data_rate[plane_id];
-               extra = min_t(u16, alloc_size,
-                             DIV64_U64_ROUND_UP(alloc_size * rate,
-                                                total_data_rate));
-               total[plane_id] = wm->wm[level].min_ddb_alloc + extra;
-               alloc_size -= extra;
-               total_data_rate -= rate;
+               data_rate = crtc_state->plane_data_rate[plane_id];
+               extra = min_t(u16, iter.size,
+                             DIV64_U64_ROUND_UP(iter.size * data_rate, 
iter.data_rate));
+               iter.total[plane_id] = wm->wm[level].min_ddb_alloc + extra;
+               iter.size -= extra;
+               iter.data_rate -= data_rate;
 
-               if (total_data_rate == 0)
+               if (iter.data_rate == 0)
                        break;
 
-               rate = crtc_state->uv_plane_data_rate[plane_id];
-               extra = min_t(u16, alloc_size,
-                             DIV64_U64_ROUND_UP(alloc_size * rate,
-                                                total_data_rate));
-               uv_total[plane_id] = wm->uv_wm[level].min_ddb_alloc + extra;
-               alloc_size -= extra;
-               total_data_rate -= rate;
+               data_rate = crtc_state->uv_plane_data_rate[plane_id];
+               extra = min_t(u16, iter.size,
+                             DIV64_U64_ROUND_UP(iter.size * data_rate, 
iter.data_rate));
+               iter.uv_total[plane_id] = wm->uv_wm[level].min_ddb_alloc + 
extra;
+               iter.size -= extra;
+               iter.data_rate -= data_rate;
        }
-       drm_WARN_ON(&dev_priv->drm, alloc_size != 0 || total_data_rate != 0);
+       drm_WARN_ON(&dev_priv->drm, iter.size != 0 || iter.data_rate != 0);
 
        /* Set the actual DDB start/end points for each plane */
-       start = alloc->start;
+       iter.start = alloc->start;
        for_each_plane_id_on_crtc(crtc, plane_id) {
                struct skl_ddb_entry *plane_alloc =
                        &crtc_state->wm.skl.plane_ddb_y[plane_id];
@@ -4971,16 +4971,16 @@ skl_allocate_pipe_ddb(struct intel_atomic_state *state,
 
                /* Gen11+ uses a separate plane for UV watermarks */
                drm_WARN_ON(&dev_priv->drm,
-                           INTEL_GEN(dev_priv) >= 11 && uv_total[plane_id]);
+                           INTEL_GEN(dev_priv) >= 11 && 
iter.uv_total[plane_id]);
 
                /* Leave disabled planes at (0,0) */
-               if (total[plane_id])
-                       start = skl_ddb_entry_init(plane_alloc, start,
-                                                  start + total[plane_id]);
+               if (iter.total[plane_id])
+                       iter.start = skl_ddb_entry_init(plane_alloc, iter.start,
+                                                       iter.start + 
iter.total[plane_id]);
 
-               if (uv_total[plane_id])
-                       start = skl_ddb_entry_init(uv_plane_alloc, start,
-                                                  start + uv_total[plane_id]);
+               if (iter.uv_total[plane_id])
+                       iter.start = skl_ddb_entry_init(uv_plane_alloc, 
iter.start,
+                                                       iter.start + 
iter.uv_total[plane_id]);
        }
 
        /*
@@ -5006,8 +5006,8 @@ skl_allocate_pipe_ddb(struct intel_atomic_state *state,
                         *  planes must be enabled before the level will be 
used."
                         * So this is actually safe to do.
                         */
-                       if (wm->wm[level].min_ddb_alloc > total[plane_id] ||
-                           wm->uv_wm[level].min_ddb_alloc > uv_total[plane_id])
+                       if (wm->wm[level].min_ddb_alloc > iter.total[plane_id] 
||
+                           wm->uv_wm[level].min_ddb_alloc > 
iter.uv_total[plane_id])
                                memset(&wm->wm[level], 0, 
sizeof(wm->wm[level]));
 
                        /*
@@ -5031,7 +5031,7 @@ skl_allocate_pipe_ddb(struct intel_atomic_state *state,
                struct skl_plane_wm *wm =
                        &crtc_state->wm.skl.optimal.planes[plane_id];
 
-               if (wm->trans_wm.plane_res_b >= total[plane_id])
+               if (wm->trans_wm.plane_res_b >= iter.total[plane_id])
                        memset(&wm->trans_wm, 0, sizeof(wm->trans_wm));
        }
 
-- 
2.26.2

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to