drivers/gpu/drm/i915/display/skl_watermark.c | 46 +++++++++-----------
3 files changed, 62 insertions(+), 25 deletions(-)
diff --git a/drivers/gpu/drm/i915/display/intel_display.c
b/drivers/gpu/drm/i915/display/intel_display.c
index c1a3a95c65f0..62ec95a75154 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -8328,3 +8328,36 @@ bool intel_scanout_needs_vtd_wa(struct
intel_display *display)
return IS_DISPLAY_VER(display, 6, 11) && i915_vtd_active(i915);
}
+
+int intel_display_scaler_prefill_latency(int num_scaler_users, u64
hscale, u64 vscale,
+ int chroma_downscaling_factor,
+ int cdclk_prefill_adjustment,
+ int linetime)
+{
+ int scaler_prefill_latency;
+
+ scaler_prefill_latency = 4 * linetime +
+ DIV_ROUND_UP_ULL((4 * linetime * hscale * vscale *
+ chroma_downscaling_factor), 1000000);
+
+ scaler_prefill_latency *= cdclk_prefill_adjustment;
+
+ return scaler_prefill_latency;
+}
+
+int intel_display_dsc_prefill_latency(int num_scaler_users, u64
*hscale, u64 *vscale,
+ int chroma_downscaling_factor,
+ int cdclk_prefill_adjustment,
+ int linetime)
+{
+ int dsc_prefill_latency;
+
+ dsc_prefill_latency = DIV_ROUND_UP(15 * linetime *
chroma_downscaling_factor, 10);
+
+ for (int i = 0; i < num_scaler_users; i++)
+ dsc_prefill_latency = DIV_ROUND_UP_ULL(dsc_prefill_latency
* hscale[i] * vscale[i],
+ 1000000);
+ dsc_prefill_latency *= cdclk_prefill_adjustment;
+
+ return dsc_prefill_latency;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_display.h
b/drivers/gpu/drm/i915/display/intel_display.h
index 37e2ab301a80..8d094b0a8c6b 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -559,5 +559,13 @@ bool assert_port_valid(struct intel_display
*display, enum port port);
bool intel_scanout_needs_vtd_wa(struct intel_display *display);
int intel_crtc_num_joined_pipes(const struct intel_crtc_state
*crtc_state);
+int intel_display_scaler_prefill_latency(int num_scaler_users, u64
hscale, u64 vscale,
+ int chroma_downscaling_factor,
+ int cdclk_prefill_adjustment,
+ int linetime);
+int intel_display_dsc_prefill_latency(int num_scaler_users, u64
*hscale, u64 *vscale,
+ int chroma_downscaling_factor,
+ int cdclk_prefill_adjustment,
+ int linetime);
#endif
diff --git a/drivers/gpu/drm/i915/display/skl_watermark.c
b/drivers/gpu/drm/i915/display/skl_watermark.c
index 97b42bbf5642..f0213785e9fc 100644
--- a/drivers/gpu/drm/i915/display/skl_watermark.c
+++ b/drivers/gpu/drm/i915/display/skl_watermark.c
@@ -2179,11 +2179,12 @@ cdclk_prefill_adjustment(const struct
intel_crtc_state *crtc_state)
static int
dsc_prefill_latency(const struct intel_crtc_state *crtc_state, int
linetime)
{
+ const struct intel_crtc_scaler_state *scaler_state =
&crtc_state->scaler_state;
+ int chroma_downscaling_factor =
skl_scaler_chroma_downscale_factor(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- const struct intel_crtc_scaler_state *scaler_state =
- &crtc_state->scaler_state;
int num_scaler_users = hweight32(scaler_state->scaler_users);
- int chroma_downscaling_factor =
skl_scaler_chroma_downscale_factor(crtc_state);
+ u64 hscale_k[ARRAY_SIZE(scaler_state->scalers)];
+ u64 vscale_k[ARRAY_SIZE(scaler_state->scalers)];
u32 dsc_prefill_latency = 0;
if (!crtc_state->dsc.compression_enable ||
@@ -2191,18 +2192,16 @@ dsc_prefill_latency(const struct
intel_crtc_state *crtc_state, int linetime)
num_scaler_users > crtc->num_scalers)
return dsc_prefill_latency;
- dsc_prefill_latency = DIV_ROUND_UP(15 * linetime *
chroma_downscaling_factor, 10);
-
for (int i = 0; i < num_scaler_users; i++) {
- u64 hscale_k, vscale_k;
-
- hscale_k = max(1000,
mul_u32_u32(scaler_state->scalers[i].hscale, 1000) >> 16);
- vscale_k = max(1000,
mul_u32_u32(scaler_state->scalers[i].vscale, 1000) >> 16);
- dsc_prefill_latency = DIV_ROUND_UP_ULL(dsc_prefill_latency
* hscale_k * vscale_k,
- 1000000);
+ hscale_k[i] = max(1000,
mul_u32_u32(scaler_state->scalers[i].hscale, 1000) >> 16);
+ vscale_k[i] = max(1000,
mul_u32_u32(scaler_state->scalers[i].vscale, 1000) >> 16);
}
- dsc_prefill_latency *= cdclk_prefill_adjustment(crtc_state);
+ dsc_prefill_latency =
+ intel_display_dsc_prefill_latency(num_scaler_users,
hscale_k, vscale_k,
+ chroma_downscaling_factor,
+ cdclk_prefill_adjustment(crtc_state),
+ linetime);
return dsc_prefill_latency;
}
@@ -2210,28 +2209,25 @@ dsc_prefill_latency(const struct
intel_crtc_state *crtc_state, int linetime)
static int
scaler_prefill_latency(const struct intel_crtc_state *crtc_state,
int linetime)
{
- const struct intel_crtc_scaler_state *scaler_state =
- &crtc_state->scaler_state;
+ const struct intel_crtc_scaler_state *scaler_state =
&crtc_state->scaler_state;
+ int chroma_downscaling_factor =
skl_scaler_chroma_downscale_factor(crtc_state);
int num_scaler_users = hweight32(scaler_state->scaler_users);
+ u64 hscale_k = 0, vscale_k = 0;
int scaler_prefill_latency = 0;
if (!num_scaler_users)
return scaler_prefill_latency;
- scaler_prefill_latency = 4 * linetime;
-
if (num_scaler_users > 1) {
- u64 hscale_k = max(1000,
mul_u32_u32(scaler_state->scalers[0].hscale, 1000) >> 16);
- u64 vscale_k = max(1000,
mul_u32_u32(scaler_state->scalers[0].vscale, 1000) >> 16);
- int chroma_downscaling_factor =
skl_scaler_chroma_downscale_factor(crtc_state);
- int latency;
-
- latency = DIV_ROUND_UP_ULL((4 * linetime * hscale_k *
vscale_k *
- chroma_downscaling_factor), 1000000);
- scaler_prefill_latency += latency;
+ hscale_k = max(1000,
mul_u32_u32(scaler_state->scalers[0].hscale, 1000) >> 16);
+ vscale_k = max(1000,
mul_u32_u32(scaler_state->scalers[0].vscale, 1000) >> 16);
}
- scaler_prefill_latency *= cdclk_prefill_adjustment(crtc_state);
+ scaler_prefill_latency =
+ intel_display_scaler_prefill_latency(num_scaler_users, hscale_k,
vscale_k,
+ chroma_downscaling_factor,
+ cdclk_prefill_adjustment(crtc_state),
+ linetime);
return scaler_prefill_latency;
}