[PATCH 1/5] drm/amd: Introduce additional IPS debug flags

2024-08-16 Thread sunpeng.li
From: Leo Li 

[Why]

Idle power states (IPS) describe levels of power-gating within DCN. DM
and DC is responsible for ensuring that we are out of IPS before any DCN
programming happens. Any DCN programming while we're in IPS leads to
undefined behavior (mostly hangs).

Because IPS intersects with all display features, the ability to disable
IPS by default while ironing out the known issues is desired. However,
disabing it completely will cause important features such as s0ix entry
to fail.

Therefore, more granular IPS debug flags are desired.

[How]

Extend the dc debug mask bits to include the available list of IPS
debug flags.

All the flags should work as documented, with the exception of
IPS_DISABLE_DYNAMIC. It requires dm changes which will be done in
later changes.

Signed-off-by: Leo Li 
---
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c |  6 ++
 drivers/gpu/drm/amd/include/amd_shared.h  | 59 ++-
 2 files changed, 64 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index dd8353283bda3..a18ecf8607232 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1864,6 +1864,12 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
 
if (amdgpu_dc_debug_mask & DC_DISABLE_IPS)
init_data.flags.disable_ips = DMUB_IPS_DISABLE_ALL;
+   else if (amdgpu_dc_debug_mask & DC_DISABLE_IPS_DYNAMIC)
+   init_data.flags.disable_ips = DMUB_IPS_DISABLE_DYNAMIC;
+   else if (amdgpu_dc_debug_mask & DC_DISABLE_IPS2_DYNAMIC)
+   init_data.flags.disable_ips = 
DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF;
+   else if (amdgpu_dc_debug_mask & DC_FORCE_IPS_ENABLE)
+   init_data.flags.disable_ips = DMUB_IPS_ENABLE;
else
init_data.flags.disable_ips = DMUB_IPS_ENABLE;
 
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h 
b/drivers/gpu/drm/amd/include/amd_shared.h
index f5b725f10a7ce..6c12ca954a53e 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -61,7 +61,7 @@ enum amd_apu_flags {
 * acquires the list of IP blocks for the GPU in use on initialization.
 * It can then operate on this list to perform standard driver operations
 * such as: init, fini, suspend, resume, etc.
-* 
+*
 *
 * IP block implementations are named using the following convention:
 * _v (E.g.: gfx_v6_0).
@@ -251,19 +251,76 @@ enum DC_FEATURE_MASK {
DC_REPLAY_MASK = (1 << 9), //0x200, disabled by default for dcn < 3.1.4
 };
 
+/**
+ * enum DC_DEBUG_MASK - Bits that are useful for debugging the display driver.
+ */
 enum DC_DEBUG_MASK {
+   /* @DC_DISABLE_PIPE_SPLIT: If set, disable pipe-splitting */
DC_DISABLE_PIPE_SPLIT = 0x1,
+
+   /* @DC_DISABLE_STUTTER: If set, disable memory stutter mode */
DC_DISABLE_STUTTER = 0x2,
+
+   /* @DC_DISABLE_DSC: If set, disable display stream compression */
DC_DISABLE_DSC = 0x4,
+
+   /*
+* @DC_DISABLE_CLOCK_GATING: If set, disable clock gating optimizations
+*/
DC_DISABLE_CLOCK_GATING = 0x8,
+
+   /* @DC_DISABLE_PSR: If set, disable Panel self refresh v1 and PSR-SU */
DC_DISABLE_PSR = 0x10,
+
+   /*
+* @DC_FORCE_SUBVP_MCLK_SWITCH: If set, force mclk switch in subvp, even
+* if mclk switch in vblank is possible
+*/
DC_FORCE_SUBVP_MCLK_SWITCH = 0x20,
+
+   /* @DC_DISABLE_MPO: If set, disable multi-plane offloading */
DC_DISABLE_MPO = 0x40,
+
+   /* @DC_ENABLE_DPIA_TRACE: If set, enable trace logging for DPIA */
DC_ENABLE_DPIA_TRACE = 0x80,
+
+   /*
+* @DC_ENABLE_DML2: If set, force usage of DML2, even if the DCN version
+* does not default to it.
+*/
DC_ENABLE_DML2 = 0x100,
+
+   /* @DC_DISABLE_PSR_SU: If set, disable PSR SU */
DC_DISABLE_PSR_SU = 0x200,
+
+   /* @DC_DISABLE_REPLAY: If set, disable Panel Replay */
DC_DISABLE_REPLAY = 0x400,
+
+   /*
+* @DC_DISABLE_IPS: If set, disable all Idle Power States, all the time.
+* If more than one IPS debug bit is set, the lowest bit takes
+* precedence. For example, if DC_FORCE_IPS_ENABLE and
+* DC_DISABLE_IPS_DYNAMIC are set, then DC_DISABLE_IPS_DYNAMIC takes
+* precedence.
+*/
DC_DISABLE_IPS = 0x800,
+
+   /*
+* @DC_DISABLE_IPS_DYNAMIC: If set, disable all IPS, all the time,
+* *except* when driver goes into suspend.
+*/
+   DC_DISABLE_IPS_DYNAMIC = 0x1000,
+
+   /*
+* @DC_DISABLE_IPS2_DYNAMIC: If set, disable IPS2 (IPS1 allowed) if
+* there is an enabled display. Otherwise, enable all IPS.
+*/
+   DC_DISABLE_IPS2_DYNAMIC = 0x2000,
+
+   /*
+* @DC_FORCE_IPS_ENABLE: If set, force enable all IPS, all the time.
+*/
+   DC

[PATCH 3/5] drm/amd/display: Go through dm for exit_ips_for_hw_access

2024-08-16 Thread sunpeng.li
From: Leo Li 

[Why]

dc_exit_ips_for_hw_access() is the same as
dc_allow_idle_optimizations(), but with a check on whether IPS is
supported by the ASIC.

[How]

Let's also pipe it through the dm function introduced by the previous
change.

No functional changes are intended.

Signed-off-by: Leo Li 
---
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 31 ---
 1 file changed, 20 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 2efa9f6e23015..c99cff3650f14 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -3092,7 +3092,7 @@ static int dm_resume(void *handle)
struct dc_commit_streams_params commit_params = {};
 
if (dm->dc->caps.ips_support) {
-   dc_dmub_srv_apply_idle_power_optimizations(dm->dc, false);
+   dm_allow_idle_optimizations(dm->dc, false);
}
 
if (amdgpu_in_reset(adev)) {
@@ -3141,7 +3141,8 @@ static int dm_resume(void *handle)
 
commit_params.streams = dc_state->streams;
commit_params.stream_count = dc_state->stream_count;
-   dc_exit_ips_for_hw_access(dm->dc);
+   if (dm->dc->caps.ips_support)
+   dm_allow_idle_optimizations(dm->dc, false);
WARN_ON(!dc_commit_streams(dm->dc, &commit_params));
 
dm_gpureset_commit_state(dm->cached_dc_state, dm);
@@ -3214,7 +3215,8 @@ static int dm_resume(void *handle)
emulated_link_detect(aconnector->dc_link);
} else {
mutex_lock(&dm->dc_lock);
-   dc_exit_ips_for_hw_access(dm->dc);
+   if (dm->dc->caps.ips_support)
+   dm_allow_idle_optimizations(dm->dc, false);
dc_link_detect(aconnector->dc_link, 
DETECT_REASON_RESUMEFROMS3S4);
mutex_unlock(&dm->dc_lock);
}
@@ -3589,7 +3591,8 @@ static void handle_hpd_irq_helper(struct 
amdgpu_dm_connector *aconnector)
drm_kms_helper_connector_hotplug_event(connector);
} else {
mutex_lock(&adev->dm.dc_lock);
-   dc_exit_ips_for_hw_access(dc);
+   if (dc->caps.ips_support)
+   dm_allow_idle_optimizations(dc, false);
ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
mutex_unlock(&adev->dm.dc_lock);
if (ret) {
@@ -3739,7 +3742,8 @@ static void handle_hpd_rx_irq(void *param)
bool ret = false;
 
mutex_lock(&adev->dm.dc_lock);
-   dc_exit_ips_for_hw_access(dc);
+   if (dc->caps.ips_support)
+   dm_allow_idle_optimizations(dc, false);
ret = dc_link_detect(dc_link, DETECT_REASON_HPDRX);
mutex_unlock(&adev->dm.dc_lock);
 
@@ -4946,7 +4950,8 @@ static int amdgpu_dm_initialize_drm_device(struct 
amdgpu_device *adev)
bool ret = false;
 
mutex_lock(&dm->dc_lock);
-   dc_exit_ips_for_hw_access(dm->dc);
+   if (dm->dc->caps.ips_support)
+   dm_allow_idle_optimizations(dm->dc, false);
ret = dc_link_detect(link, DETECT_REASON_BOOT);
mutex_unlock(&dm->dc_lock);
 
@@ -9349,7 +9354,8 @@ static void amdgpu_dm_commit_streams(struct 
drm_atomic_state *state,
 
memset(&position, 0, sizeof(position));
mutex_lock(&dm->dc_lock);
-   dc_exit_ips_for_hw_access(dm->dc);
+   if (dm->dc->caps.ips_support)
+   dm_allow_idle_optimizations(dm->dc, false);

dc_stream_program_cursor_position(dm_old_crtc_state->stream, &position);
mutex_unlock(&dm->dc_lock);
}
@@ -9424,7 +9430,8 @@ static void amdgpu_dm_commit_streams(struct 
drm_atomic_state *state,
 
dm_enable_per_frame_crtc_master_sync(dc_state);
mutex_lock(&dm->dc_lock);
-   dc_exit_ips_for_hw_access(dm->dc);
+   if (dm->dc->caps.ips_support)
+   dm_allow_idle_optimizations(dm->dc, false);
WARN_ON(!dc_commit_streams(dm->dc, ¶ms));
 
/* Allow idle optimization when vblank count is 0 for display off */
@@ -9793,7 +9800,8 @@ static void amdgpu_dm_atomic_commit_tail(struct 
drm_atomic_state *state)
 sizeof(*dummy_updates), dm_plane_layer_index_cmp, NULL);
 
mutex_lock(&dm->dc_lock);
-   dc_exit_ips_for_hw_access(dm->dc);
+   if (dm->dc->caps.ips_support)
+   dm_allow_idle_optimizations(dm->dc, f

[PATCH 2/5] drm/amd/display: Go through dm for allow_idle_optimizations

2024-08-16 Thread sunpeng.li
From: Leo Li 

[Why]

In preparation for enabling IPS debug flags that require DM changes,
a common entry point for allowing DC idle optimisations is needed.

[How]

Create an alias in DM for dc_allow_idle_optimizations(). Change all
calls to it into dm_allow_idle_optimizations().

No functional changes are intended.

Signed-off-by: Leo Li 
---
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c| 16 
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h|  6 ++
 .../drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c   |  8 
 .../drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c|  2 +-
 .../drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c|  2 +-
 5 files changed, 24 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index a18ecf8607232..2efa9f6e23015 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -246,6 +246,14 @@ static void handle_hpd_rx_irq(void *param);
 static bool
 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
 struct drm_crtc_state *new_crtc_state);
+
+void dm_allow_idle_optimizations_internal(struct dc *dc,
+ bool allow,
+ char const *caller_name)
+{
+   dc_allow_idle_optimizations_internal(dc, allow, caller_name);
+}
+
 /*
  * dm_vblank_get_counter
  *
@@ -296,7 +304,7 @@ static int dm_crtc_get_scanoutpos(struct amdgpu_device 
*adev, int crtc,
}
 
if (dc && dc->caps.ips_support && dc->idle_optimizations_allowed)
-   dc_allow_idle_optimizations(dc, false);
+   dm_allow_idle_optimizations(dc, false);
 
/*
 * TODO rework base driver to use values directly.
@@ -2883,7 +2891,7 @@ static int dm_suspend(void *handle)
if (amdgpu_in_reset(adev)) {
mutex_lock(&dm->dc_lock);
 
-   dc_allow_idle_optimizations(adev->dm.dc, false);
+   dm_allow_idle_optimizations(adev->dm.dc, false);
 
dm->cached_dc_state = 
dc_state_create_copy(dm->dc->current_state);
 
@@ -2911,7 +2919,7 @@ static int dm_suspend(void *handle)
hpd_rx_irq_work_suspend(dm);
 
if (adev->dm.dc->caps.ips_support)
-   dc_allow_idle_optimizations(adev->dm.dc, true);
+   dm_allow_idle_optimizations(adev->dm.dc, true);
 
dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, 
DC_ACPI_CM_POWER_STATE_D3);
@@ -9421,7 +9429,7 @@ static void amdgpu_dm_commit_streams(struct 
drm_atomic_state *state,
 
/* Allow idle optimization when vblank count is 0 for display off */
if (dm->active_vblank_irq_count == 0)
-   dc_allow_idle_optimizations(dm->dc, true);
+   dm_allow_idle_optimizations(dm->dc, true);
mutex_unlock(&dm->dc_lock);
 
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 2d7755e2b6c32..3fc3c12b3a4a1 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -1007,4 +1007,10 @@ void *dm_allocate_gpu_mem(struct amdgpu_device *adev,
 
 bool amdgpu_dm_is_headless(struct amdgpu_device *adev);
 
+void dm_allow_idle_optimizations_internal(struct dc *dc,
+ bool allow,
+ char const *caller_name);
+#define dm_allow_idle_optimizations(dc, allow) \
+   dm_allow_idle_optimizations_internal(dc, allow, __func__)
+
 #endif /* __AMDGPU_DM_H__ */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
index a2cf2c066a76d..5b0d426ad50db 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
@@ -203,7 +203,7 @@ static void amdgpu_dm_idle_worker(struct work_struct *work)
mutex_unlock(&idle_work->dm->dc_lock);
break;
}
-   dc_allow_idle_optimizations(idle_work->dm->dc, false);
+   dm_allow_idle_optimizations(idle_work->dm->dc, false);
 
mutex_unlock(&idle_work->dm->dc_lock);
fsleep(HPD_DETECTION_TIME_uS);
@@ -216,7 +216,7 @@ static void amdgpu_dm_idle_worker(struct work_struct *work)
}
 
if (idle_work->enable)
-   dc_allow_idle_optimizations(idle_work->dm->dc, true);
+   dm_allow_idle_optimizations(idle_work->dm->dc, true);
mutex_unlock(&idle_work->dm->dc_lock);
}
idle_work->dm->idle_workqueue->running = false;
@@ -253,7 +253,7 @@ static void amd

[PATCH 5/5] drm/amd/display: Default to DMUB_IPS_DISABLE_DYNAMIC

2024-08-16 Thread sunpeng.li
From: Leo Li 

[Why]

There are currently known backlight and abm issues when IPS is enabled
on DCN35.

While the issues are being ironed out, let's default to
IPS_DISABLE_DYNAMIC so users will not experience hangs. When the issues
are resolved, this patch should be reverted.

[How]

Set the default IPS config to DMUB_IPS_DISABLE_DYNAMIC

Signed-off-by: Leo Li 
---
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index f9b5966746c73..b1794ced82172 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1907,7 +1907,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
else if (amdgpu_dc_debug_mask & DC_FORCE_IPS_ENABLE)
init_data.flags.disable_ips = DMUB_IPS_ENABLE;
else
-   init_data.flags.disable_ips = DMUB_IPS_ENABLE;
+   init_data.flags.disable_ips = DMUB_IPS_DISABLE_DYNAMIC;
 
init_data.flags.disable_ips_in_vpb = 0;
 
-- 
2.46.0



[PATCH 4/5] drm/amd/display: Implement DMUB_IPS_DISABLE_DYNAMIC

2024-08-16 Thread sunpeng.li
From: Leo Li 

[Why]

The IPS_DISABLE_DYNAMIC configuration disables IPS in all cases except
for when the driver is put into d3 for s0ix.

[How]

Now that we have a common entry point into dc_allow_idle_optimizations
from dm, implement said configuration there.

Signed-off-by: Leo Li 
---
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 42 +++
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h |  5 ++-
 2 files changed, 39 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index c99cff3650f14..f9b5966746c73 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -247,10 +247,38 @@ static bool
 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
 struct drm_crtc_state *new_crtc_state);
 
+/**
+ * dm_allow_idle_optimizations_internal() - Allow or disallow idle state entry
+ * @dc: pointer to display core struct
+ * @allow: If true, DC is allowed to go into idle power states. If false, DC
+ * will immediately exit idle power states
+ * @suspend_resume_path: Set to true if the caller is part of the suspend or
+ * resume path
+ * @caller_name: Function name of the caller - for debugging purposes
+ *
+ * Debug flags are considered in this function, so any DM callers should go
+ * thorugh this rather than call dc/dmcub interfaces directly.
+ */
 void dm_allow_idle_optimizations_internal(struct dc *dc,
  bool allow,
+ bool suspend_resume_path,
  char const *caller_name)
 {
+   /*
+* We can early-return here if IPS support exists, and DISABLE_ALL debug
+* flag is set.
+*
+* We also need special handling for IPS_DISABLE_DYNAMIC in DM to allow
+* IPS only for the suspend/resume call path.
+*
+* The rest of the debug flags are handled in `dc_dmub_srv_notify_idle`,
+* which if IPS is supported, will eventually be called.
+*/
+   if (dc->caps.ips_support &&
+   (dc->config.disable_ips == DMUB_IPS_DISABLE_ALL ||
+   (!suspend_resume_path && dc->config.disable_ips == 
DMUB_IPS_DISABLE_DYNAMIC)))
+   return;
+
dc_allow_idle_optimizations_internal(dc, allow, caller_name);
 }
 
@@ -2891,7 +2919,7 @@ static int dm_suspend(void *handle)
if (amdgpu_in_reset(adev)) {
mutex_lock(&dm->dc_lock);
 
-   dm_allow_idle_optimizations(adev->dm.dc, false);
+   dm_allow_idle_optimizations_suspend(adev->dm.dc, false);
 
dm->cached_dc_state = 
dc_state_create_copy(dm->dc->current_state);
 
@@ -2918,8 +2946,9 @@ static int dm_suspend(void *handle)
 
hpd_rx_irq_work_suspend(dm);
 
+   /* IPS2 entry is required for standby */
if (adev->dm.dc->caps.ips_support)
-   dm_allow_idle_optimizations(adev->dm.dc, true);
+   dm_allow_idle_optimizations_suspend(adev->dm.dc, true);
 
dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, 
DC_ACPI_CM_POWER_STATE_D3);
@@ -3091,9 +3120,8 @@ static int dm_resume(void *handle)
bool need_hotplug = false;
struct dc_commit_streams_params commit_params = {};
 
-   if (dm->dc->caps.ips_support) {
-   dm_allow_idle_optimizations(dm->dc, false);
-   }
+   if (dm->dc->caps.ips_support)
+   dm_allow_idle_optimizations_suspend(dm->dc, false);
 
if (amdgpu_in_reset(adev)) {
dc_state = dm->cached_dc_state;
@@ -3142,7 +3170,7 @@ static int dm_resume(void *handle)
commit_params.streams = dc_state->streams;
commit_params.stream_count = dc_state->stream_count;
if (dm->dc->caps.ips_support)
-   dm_allow_idle_optimizations(dm->dc, false);
+   dm_allow_idle_optimizations_suspend(dm->dc, false);
WARN_ON(!dc_commit_streams(dm->dc, &commit_params));
 
dm_gpureset_commit_state(dm->cached_dc_state, dm);
@@ -3216,7 +3244,7 @@ static int dm_resume(void *handle)
} else {
mutex_lock(&dm->dc_lock);
if (dm->dc->caps.ips_support)
-   dm_allow_idle_optimizations(dm->dc, false);
+   dm_allow_idle_optimizations_suspend(dm->dc, 
false);
dc_link_detect(aconnector->dc_link, 
DETECT_REASON_RESUMEFROMS3S4);
mutex_unlock(&dm->dc_lock);
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 3fc3c12b3a4a1..da7283c67b13c 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b

[PATCH v2 1/5] drm/amd: Introduce additional IPS debug flags

2024-08-19 Thread sunpeng.li
From: Leo Li 

[Why]

Idle power states (IPS) describe levels of power-gating within DCN. DM
and DC is responsible for ensuring that we are out of IPS before any DCN
programming happens. Any DCN programming while we're in IPS leads to
undefined behavior (mostly hangs).

Because IPS intersects with all display features, the ability to disable
IPS by default while ironing out the known issues is desired. However,
disabing it completely will cause important features such as s0ix entry
to fail.

Therefore, more granular IPS debug flags are desired.

[How]

Extend the dc debug mask bits to include the available list of IPS
debug flags.

All the flags should work as documented, with the exception of
IPS_DISABLE_DYNAMIC. It requires dm changes which will be done in
later changes.

v2: enable docs and fix docstring format

Signed-off-by: Leo Li 
---
 Documentation/gpu/amdgpu/driver-core.rst  |  2 +-
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c |  6 ++
 drivers/gpu/drm/amd/include/amd_shared.h  | 75 ++-
 3 files changed, 81 insertions(+), 2 deletions(-)

diff --git a/Documentation/gpu/amdgpu/driver-core.rst 
b/Documentation/gpu/amdgpu/driver-core.rst
index 467e6843aef63..32723a925377e 100644
--- a/Documentation/gpu/amdgpu/driver-core.rst
+++ b/Documentation/gpu/amdgpu/driver-core.rst
@@ -179,4 +179,4 @@ IP Blocks
:doc: IP Blocks
 
 .. kernel-doc:: drivers/gpu/drm/amd/include/amd_shared.h
-   :identifiers: amd_ip_block_type amd_ip_funcs
+   :identifiers: amd_ip_block_type amd_ip_funcs DC_DEBUG_MASK
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index dd8353283bda3..a18ecf8607232 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1864,6 +1864,12 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
 
if (amdgpu_dc_debug_mask & DC_DISABLE_IPS)
init_data.flags.disable_ips = DMUB_IPS_DISABLE_ALL;
+   else if (amdgpu_dc_debug_mask & DC_DISABLE_IPS_DYNAMIC)
+   init_data.flags.disable_ips = DMUB_IPS_DISABLE_DYNAMIC;
+   else if (amdgpu_dc_debug_mask & DC_DISABLE_IPS2_DYNAMIC)
+   init_data.flags.disable_ips = 
DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF;
+   else if (amdgpu_dc_debug_mask & DC_FORCE_IPS_ENABLE)
+   init_data.flags.disable_ips = DMUB_IPS_ENABLE;
else
init_data.flags.disable_ips = DMUB_IPS_ENABLE;
 
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h 
b/drivers/gpu/drm/amd/include/amd_shared.h
index f5b725f10a7ce..745fd052840dc 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -61,7 +61,7 @@ enum amd_apu_flags {
 * acquires the list of IP blocks for the GPU in use on initialization.
 * It can then operate on this list to perform standard driver operations
 * such as: init, fini, suspend, resume, etc.
-* 
+*
 *
 * IP block implementations are named using the following convention:
 * _v (E.g.: gfx_v6_0).
@@ -251,19 +251,92 @@ enum DC_FEATURE_MASK {
DC_REPLAY_MASK = (1 << 9), //0x200, disabled by default for dcn < 3.1.4
 };
 
+/**
+ * enum DC_DEBUG_MASK - Bits that are useful for debugging the Display Core IP
+ */
 enum DC_DEBUG_MASK {
+   /**
+* @DC_DISABLE_PIPE_SPLIT: If set, disable pipe-splitting
+*/
DC_DISABLE_PIPE_SPLIT = 0x1,
+
+   /**
+* @DC_DISABLE_STUTTER: If set, disable memory stutter mode
+*/
DC_DISABLE_STUTTER = 0x2,
+
+   /**
+* @DC_DISABLE_DSC: If set, disable display stream compression
+*/
DC_DISABLE_DSC = 0x4,
+
+   /**
+* @DC_DISABLE_CLOCK_GATING: If set, disable clock gating optimizations
+*/
DC_DISABLE_CLOCK_GATING = 0x8,
+
+   /**
+* @DC_DISABLE_PSR: If set, disable Panel self refresh v1 and PSR-SU
+*/
DC_DISABLE_PSR = 0x10,
+
+   /**
+* @DC_FORCE_SUBVP_MCLK_SWITCH: If set, force mclk switch in subvp, even
+* if mclk switch in vblank is possible
+*/
DC_FORCE_SUBVP_MCLK_SWITCH = 0x20,
+
+   /**
+* @DC_DISABLE_MPO: If set, disable multi-plane offloading
+*/
DC_DISABLE_MPO = 0x40,
+
+   /**
+* @DC_ENABLE_DPIA_TRACE: If set, enable trace logging for DPIA
+*/
DC_ENABLE_DPIA_TRACE = 0x80,
+
+   /**
+* @DC_ENABLE_DML2: If set, force usage of DML2, even if the DCN version
+* does not default to it.
+*/
DC_ENABLE_DML2 = 0x100,
+
+   /**
+* @DC_DISABLE_PSR_SU: If set, disable PSR SU
+*/
DC_DISABLE_PSR_SU = 0x200,
+
+   /**
+* @DC_DISABLE_REPLAY: If set, disable Panel Replay
+*/
DC_DISABLE_REPLAY = 0x400,
+
+   /**
+* @DC_DISABLE_IPS: If set, disable all Idle Power States, all the time.
+* If more than one IPS debug bit is set, the lowest bit t

[PATCH v2 2/5] drm/amd/display: Go through dm for allow_idle_optimizations

2024-08-19 Thread sunpeng.li
From: Leo Li 

[Why]

In preparation for enabling IPS debug flags that require DM changes,
a common entry point for allowing DC idle optimisations is needed.

[How]

Create an alias in DM for dc_allow_idle_optimizations(). Change all
calls to it into dm_allow_idle_optimizations().

No functional changes are intended.

Signed-off-by: Leo Li 
---
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c| 16 
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h|  6 ++
 .../drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c   |  8 
 .../drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c|  2 +-
 .../drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c|  2 +-
 5 files changed, 24 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index a18ecf8607232..2efa9f6e23015 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -246,6 +246,14 @@ static void handle_hpd_rx_irq(void *param);
 static bool
 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
 struct drm_crtc_state *new_crtc_state);
+
+void dm_allow_idle_optimizations_internal(struct dc *dc,
+ bool allow,
+ char const *caller_name)
+{
+   dc_allow_idle_optimizations_internal(dc, allow, caller_name);
+}
+
 /*
  * dm_vblank_get_counter
  *
@@ -296,7 +304,7 @@ static int dm_crtc_get_scanoutpos(struct amdgpu_device 
*adev, int crtc,
}
 
if (dc && dc->caps.ips_support && dc->idle_optimizations_allowed)
-   dc_allow_idle_optimizations(dc, false);
+   dm_allow_idle_optimizations(dc, false);
 
/*
 * TODO rework base driver to use values directly.
@@ -2883,7 +2891,7 @@ static int dm_suspend(void *handle)
if (amdgpu_in_reset(adev)) {
mutex_lock(&dm->dc_lock);
 
-   dc_allow_idle_optimizations(adev->dm.dc, false);
+   dm_allow_idle_optimizations(adev->dm.dc, false);
 
dm->cached_dc_state = 
dc_state_create_copy(dm->dc->current_state);
 
@@ -2911,7 +2919,7 @@ static int dm_suspend(void *handle)
hpd_rx_irq_work_suspend(dm);
 
if (adev->dm.dc->caps.ips_support)
-   dc_allow_idle_optimizations(adev->dm.dc, true);
+   dm_allow_idle_optimizations(adev->dm.dc, true);
 
dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, 
DC_ACPI_CM_POWER_STATE_D3);
@@ -9421,7 +9429,7 @@ static void amdgpu_dm_commit_streams(struct 
drm_atomic_state *state,
 
/* Allow idle optimization when vblank count is 0 for display off */
if (dm->active_vblank_irq_count == 0)
-   dc_allow_idle_optimizations(dm->dc, true);
+   dm_allow_idle_optimizations(dm->dc, true);
mutex_unlock(&dm->dc_lock);
 
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 2d7755e2b6c32..3fc3c12b3a4a1 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -1007,4 +1007,10 @@ void *dm_allocate_gpu_mem(struct amdgpu_device *adev,
 
 bool amdgpu_dm_is_headless(struct amdgpu_device *adev);
 
+void dm_allow_idle_optimizations_internal(struct dc *dc,
+ bool allow,
+ char const *caller_name);
+#define dm_allow_idle_optimizations(dc, allow) \
+   dm_allow_idle_optimizations_internal(dc, allow, __func__)
+
 #endif /* __AMDGPU_DM_H__ */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
index a2cf2c066a76d..5b0d426ad50db 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
@@ -203,7 +203,7 @@ static void amdgpu_dm_idle_worker(struct work_struct *work)
mutex_unlock(&idle_work->dm->dc_lock);
break;
}
-   dc_allow_idle_optimizations(idle_work->dm->dc, false);
+   dm_allow_idle_optimizations(idle_work->dm->dc, false);
 
mutex_unlock(&idle_work->dm->dc_lock);
fsleep(HPD_DETECTION_TIME_uS);
@@ -216,7 +216,7 @@ static void amdgpu_dm_idle_worker(struct work_struct *work)
}
 
if (idle_work->enable)
-   dc_allow_idle_optimizations(idle_work->dm->dc, true);
+   dm_allow_idle_optimizations(idle_work->dm->dc, true);
mutex_unlock(&idle_work->dm->dc_lock);
}
idle_work->dm->idle_workqueue->running = false;
@@ -253,7 +253,7 @@ static void amd

[PATCH v2 3/5] drm/amd/display: Go through dm for exit_ips_for_hw_access

2024-08-19 Thread sunpeng.li
From: Leo Li 

[Why]

dc_exit_ips_for_hw_access() is the same as
dc_allow_idle_optimizations(), but with a check on whether IPS is
supported by the ASIC.

[How]

Let's also pipe it through the dm function introduced by the previous
change.

No functional changes are intended.

Signed-off-by: Leo Li 
---
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 31 ---
 1 file changed, 20 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 2efa9f6e23015..c99cff3650f14 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -3092,7 +3092,7 @@ static int dm_resume(void *handle)
struct dc_commit_streams_params commit_params = {};
 
if (dm->dc->caps.ips_support) {
-   dc_dmub_srv_apply_idle_power_optimizations(dm->dc, false);
+   dm_allow_idle_optimizations(dm->dc, false);
}
 
if (amdgpu_in_reset(adev)) {
@@ -3141,7 +3141,8 @@ static int dm_resume(void *handle)
 
commit_params.streams = dc_state->streams;
commit_params.stream_count = dc_state->stream_count;
-   dc_exit_ips_for_hw_access(dm->dc);
+   if (dm->dc->caps.ips_support)
+   dm_allow_idle_optimizations(dm->dc, false);
WARN_ON(!dc_commit_streams(dm->dc, &commit_params));
 
dm_gpureset_commit_state(dm->cached_dc_state, dm);
@@ -3214,7 +3215,8 @@ static int dm_resume(void *handle)
emulated_link_detect(aconnector->dc_link);
} else {
mutex_lock(&dm->dc_lock);
-   dc_exit_ips_for_hw_access(dm->dc);
+   if (dm->dc->caps.ips_support)
+   dm_allow_idle_optimizations(dm->dc, false);
dc_link_detect(aconnector->dc_link, 
DETECT_REASON_RESUMEFROMS3S4);
mutex_unlock(&dm->dc_lock);
}
@@ -3589,7 +3591,8 @@ static void handle_hpd_irq_helper(struct 
amdgpu_dm_connector *aconnector)
drm_kms_helper_connector_hotplug_event(connector);
} else {
mutex_lock(&adev->dm.dc_lock);
-   dc_exit_ips_for_hw_access(dc);
+   if (dc->caps.ips_support)
+   dm_allow_idle_optimizations(dc, false);
ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
mutex_unlock(&adev->dm.dc_lock);
if (ret) {
@@ -3739,7 +3742,8 @@ static void handle_hpd_rx_irq(void *param)
bool ret = false;
 
mutex_lock(&adev->dm.dc_lock);
-   dc_exit_ips_for_hw_access(dc);
+   if (dc->caps.ips_support)
+   dm_allow_idle_optimizations(dc, false);
ret = dc_link_detect(dc_link, DETECT_REASON_HPDRX);
mutex_unlock(&adev->dm.dc_lock);
 
@@ -4946,7 +4950,8 @@ static int amdgpu_dm_initialize_drm_device(struct 
amdgpu_device *adev)
bool ret = false;
 
mutex_lock(&dm->dc_lock);
-   dc_exit_ips_for_hw_access(dm->dc);
+   if (dm->dc->caps.ips_support)
+   dm_allow_idle_optimizations(dm->dc, false);
ret = dc_link_detect(link, DETECT_REASON_BOOT);
mutex_unlock(&dm->dc_lock);
 
@@ -9349,7 +9354,8 @@ static void amdgpu_dm_commit_streams(struct 
drm_atomic_state *state,
 
memset(&position, 0, sizeof(position));
mutex_lock(&dm->dc_lock);
-   dc_exit_ips_for_hw_access(dm->dc);
+   if (dm->dc->caps.ips_support)
+   dm_allow_idle_optimizations(dm->dc, false);

dc_stream_program_cursor_position(dm_old_crtc_state->stream, &position);
mutex_unlock(&dm->dc_lock);
}
@@ -9424,7 +9430,8 @@ static void amdgpu_dm_commit_streams(struct 
drm_atomic_state *state,
 
dm_enable_per_frame_crtc_master_sync(dc_state);
mutex_lock(&dm->dc_lock);
-   dc_exit_ips_for_hw_access(dm->dc);
+   if (dm->dc->caps.ips_support)
+   dm_allow_idle_optimizations(dm->dc, false);
WARN_ON(!dc_commit_streams(dm->dc, ¶ms));
 
/* Allow idle optimization when vblank count is 0 for display off */
@@ -9793,7 +9800,8 @@ static void amdgpu_dm_atomic_commit_tail(struct 
drm_atomic_state *state)
 sizeof(*dummy_updates), dm_plane_layer_index_cmp, NULL);
 
mutex_lock(&dm->dc_lock);
-   dc_exit_ips_for_hw_access(dm->dc);
+   if (dm->dc->caps.ips_support)
+   dm_allow_idle_optimizations(dm->dc, f

[PATCH v2 4/5] drm/amd/display: Implement DMUB_IPS_DISABLE_DYNAMIC

2024-08-19 Thread sunpeng.li
From: Leo Li 

[Why]

The IPS_DISABLE_DYNAMIC configuration disables IPS in all cases except
for when the driver is put into d3 for s0ix.

[How]

Now that we have a common entry point into dc_allow_idle_optimizations
from dm, implement said configuration there.

Signed-off-by: Leo Li 
---
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 42 +++
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h |  5 ++-
 2 files changed, 39 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index c99cff3650f14..f9b5966746c73 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -247,10 +247,38 @@ static bool
 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
 struct drm_crtc_state *new_crtc_state);
 
+/**
+ * dm_allow_idle_optimizations_internal() - Allow or disallow idle state entry
+ * @dc: pointer to display core struct
+ * @allow: If true, DC is allowed to go into idle power states. If false, DC
+ * will immediately exit idle power states
+ * @suspend_resume_path: Set to true if the caller is part of the suspend or
+ * resume path
+ * @caller_name: Function name of the caller - for debugging purposes
+ *
+ * Debug flags are considered in this function, so any DM callers should go
+ * thorugh this rather than call dc/dmcub interfaces directly.
+ */
 void dm_allow_idle_optimizations_internal(struct dc *dc,
  bool allow,
+ bool suspend_resume_path,
  char const *caller_name)
 {
+   /*
+* We can early-return here if IPS support exists, and DISABLE_ALL debug
+* flag is set.
+*
+* We also need special handling for IPS_DISABLE_DYNAMIC in DM to allow
+* IPS only for the suspend/resume call path.
+*
+* The rest of the debug flags are handled in `dc_dmub_srv_notify_idle`,
+* which if IPS is supported, will eventually be called.
+*/
+   if (dc->caps.ips_support &&
+   (dc->config.disable_ips == DMUB_IPS_DISABLE_ALL ||
+   (!suspend_resume_path && dc->config.disable_ips == 
DMUB_IPS_DISABLE_DYNAMIC)))
+   return;
+
dc_allow_idle_optimizations_internal(dc, allow, caller_name);
 }
 
@@ -2891,7 +2919,7 @@ static int dm_suspend(void *handle)
if (amdgpu_in_reset(adev)) {
mutex_lock(&dm->dc_lock);
 
-   dm_allow_idle_optimizations(adev->dm.dc, false);
+   dm_allow_idle_optimizations_suspend(adev->dm.dc, false);
 
dm->cached_dc_state = 
dc_state_create_copy(dm->dc->current_state);
 
@@ -2918,8 +2946,9 @@ static int dm_suspend(void *handle)
 
hpd_rx_irq_work_suspend(dm);
 
+   /* IPS2 entry is required for standby */
if (adev->dm.dc->caps.ips_support)
-   dm_allow_idle_optimizations(adev->dm.dc, true);
+   dm_allow_idle_optimizations_suspend(adev->dm.dc, true);
 
dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, 
DC_ACPI_CM_POWER_STATE_D3);
@@ -3091,9 +3120,8 @@ static int dm_resume(void *handle)
bool need_hotplug = false;
struct dc_commit_streams_params commit_params = {};
 
-   if (dm->dc->caps.ips_support) {
-   dm_allow_idle_optimizations(dm->dc, false);
-   }
+   if (dm->dc->caps.ips_support)
+   dm_allow_idle_optimizations_suspend(dm->dc, false);
 
if (amdgpu_in_reset(adev)) {
dc_state = dm->cached_dc_state;
@@ -3142,7 +3170,7 @@ static int dm_resume(void *handle)
commit_params.streams = dc_state->streams;
commit_params.stream_count = dc_state->stream_count;
if (dm->dc->caps.ips_support)
-   dm_allow_idle_optimizations(dm->dc, false);
+   dm_allow_idle_optimizations_suspend(dm->dc, false);
WARN_ON(!dc_commit_streams(dm->dc, &commit_params));
 
dm_gpureset_commit_state(dm->cached_dc_state, dm);
@@ -3216,7 +3244,7 @@ static int dm_resume(void *handle)
} else {
mutex_lock(&dm->dc_lock);
if (dm->dc->caps.ips_support)
-   dm_allow_idle_optimizations(dm->dc, false);
+   dm_allow_idle_optimizations_suspend(dm->dc, 
false);
dc_link_detect(aconnector->dc_link, 
DETECT_REASON_RESUMEFROMS3S4);
mutex_unlock(&dm->dc_lock);
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 3fc3c12b3a4a1..da7283c67b13c 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b

[PATCH v2 5/5] drm/amd/display: Default to DMUB_IPS_DISABLE_DYNAMIC

2024-08-19 Thread sunpeng.li
From: Leo Li 

[Why]

There are currently known backlight and abm issues when IPS is enabled
on DCN35.

While the issues are being ironed out, let's default to
IPS_DISABLE_DYNAMIC so users will not experience hangs. When the issues
are resolved, this patch should be reverted.

[How]

Set the default IPS config to DMUB_IPS_DISABLE_DYNAMIC

Signed-off-by: Leo Li 
---
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index f9b5966746c73..b1794ced82172 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1907,7 +1907,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
else if (amdgpu_dc_debug_mask & DC_FORCE_IPS_ENABLE)
init_data.flags.disable_ips = DMUB_IPS_ENABLE;
else
-   init_data.flags.disable_ips = DMUB_IPS_ENABLE;
+   init_data.flags.disable_ips = DMUB_IPS_DISABLE_DYNAMIC;
 
init_data.flags.disable_ips_in_vpb = 0;
 
-- 
2.46.0



[PATCH] drm/amd/display: Determine IPS mode by ASIC and PMFW versions

2024-08-27 Thread sunpeng.li
From: Leo Li 

[Why]

DCN IPS interoperates with other system idle power features, such as
Zstates.

On DCN35, there is a known issue where system Z8 + DCN IPS2 causes a
hard hang. We observe this on systems where the SBIOS allows Z8.

Though there is a SBIOS fix, there's no guarantee that users will get it
any time soon, or even install it. A workaround is needed to prevent
this from rearing its head in the wild.

[How]

For DCN35, check the pmfw version to determine whether the SBIOS has the
fix. If not, set IPS1+RCG as the deepest possible state in all cases
except for s0ix and display off (DPMS). Otherwise, enable all IPS

Signed-off-by: Leo Li 
---
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 26 ++-
 1 file changed, 25 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index a18ecf8607232..a2e4973a4f6e3 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1754,6 +1754,30 @@ static struct dml2_soc_bb 
*dm_dmub_get_vbios_bounding_box(struct amdgpu_device *
return bb;
 }
 
+static enum dmub_ips_disable_type dm_get_default_ips_mode(
+   struct amdgpu_device *adev)
+{
+   /*
+* On DCN35 systems with Z8 enabled, it's possible for IPS2 + Z8 to
+* cause a hard hang. A fix exists for newer PMFW.
+*
+* As a workaround, for non-fixed PMFW, force IPS1+RCG as the deepest
+* IPS state in all cases, except for s0ix and all displays off (DPMS),
+* where IPS2 is allowed.
+*
+* When checking pmfw version, use the major and minor only.
+*/
+   if (amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(3, 5, 0) &&
+   (adev->pm.fw_version & 0x0000) < 0x005D6300)
+   return DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF;
+
+   if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 5, 0))
+   return DMUB_IPS_ENABLE;
+
+   /* ASICs older than DCN35 do not have IPSs */
+   return DMUB_IPS_DISABLE_ALL;
+}
+
 static int amdgpu_dm_init(struct amdgpu_device *adev)
 {
struct dc_init_data init_data;
@@ -1871,7 +1895,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
else if (amdgpu_dc_debug_mask & DC_FORCE_IPS_ENABLE)
init_data.flags.disable_ips = DMUB_IPS_ENABLE;
else
-   init_data.flags.disable_ips = DMUB_IPS_ENABLE;
+   init_data.flags.disable_ips = dm_get_default_ips_mode(adev);
 
init_data.flags.disable_ips_in_vpb = 0;
 
-- 
2.46.0



[PATCH] drm/amd/display: Do not reset planes based on crtc zpos_changed

2024-09-06 Thread sunpeng.li
From: Leo Li 

[Why]

drm_normalize_zpos will set the crtc_state->zpos_changed to 1 if any of
it's assigned planes changes zpos, or is removed/added from it.

To have amdgpu_dm request a plane reset on this is too broad. For
example, if only the cursor plane was moved from one crtc to another,
the crtc's zpos_changed will be set to true. But that does not mean that
the underlying primary plane requires a reset.

[How]

Narrow it down so that only the plane that has a change in zpos will
require a reset.

As a future TODO, we can further optimize this by only requiring a reset
on z-order change. Z-order is different from z-pos, since a zpos change
doesn't necessarily mean the z-ordering changed, and DC should only
require a reset if the z-ordering changed.

For example, the following zpos update does not change z-ordering:

Plane A: zpos 2 -> 3
Plane B: zpos 1 -> 2
=> Plane A is still on top of plane B: no reset needed

Whereas this one does change z-ordering:

Plane A: zpos 2 -> 1
Plane B: zpos 1 -> 2
=> Plane A changed from on top, to below plane B: reset needed

Fixes: 38e0c3df6dbd ("drm/amd/display: Move PRIMARY plane zpos higher")
Closes: https://gitlab.freedesktop.org/drm/amd/-/issues/3569
Signed-off-by: Leo Li 
---
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 43bf8883ddc0f..a3edaf658ae00 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -10579,7 +10579,7 @@ static bool should_reset_plane(struct drm_atomic_state 
*state,
 * TODO: We can likely skip bandwidth validation if the only thing that
 * changed about the plane was it'z z-ordering.
 */
-   if (new_crtc_state->zpos_changed)
+   if (old_plane_state->normalized_zpos != 
new_plane_state->normalized_zpos)
return true;
 
if (drm_atomic_crtc_needs_modeset(new_crtc_state))
-- 
2.46.0



[PATCH] drm/amd/display: Add all planes on CRTC to state for overlay cursor

2024-09-11 Thread sunpeng.li
From: Leo Li 

[Why]

DC has a special commit path for native cursor, which use the built-in
cursor pipe within DCN planes. This update path does not require all
enabled planes to be added to the list of surface updates sent to DC.

This is not the case for overlay cursor; it uses the same path as MPO
commits. This update path requires all enabled planes to be added to the
list of surface updates sent to DC. Otherwise, DC will disable planes
not inside the list.

[How]

If overlay cursor is needed, add all planes on the same CRTC as this
cursor to the atomic state. This is already done for non-cursor planes
(MPO), just before the added lines.

Fixes: 1b04dcca4fb1 ("drm/amd/display: Introduce overlay cursor mode")
Closes: 
https://lore.kernel.org/lkml/f68020a3-c413-482d-beb2-5432d98a1...@amd.com
Signed-off-by: Leo Li 
Tested-by: Mikhail Gavrilov 
---
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 11 +++
 1 file changed, 11 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index a3edaf658ae00..6b5baa3e20c49 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -11427,6 +11427,17 @@ static int amdgpu_dm_atomic_check(struct drm_device 
*dev,
drm_dbg(dev, "Failed to determine cursor mode\n");
goto fail;
}
+
+   /*
+* If overlay cursor is needed, DC cannot go through the
+* native cursor update path. All enabled planes on the CRTC
+* need to be added for DC to not disable a plane by mistake
+*/
+   if (dm_new_crtc_state->cursor_mode == DM_CURSOR_OVERLAY_MODE) {
+   ret = drm_atomic_add_affected_planes(state, crtc);
+   if (ret)
+   goto fail;
+   }
}
 
/* Remove exiting planes if they are modified */
-- 
2.46.0



[PATCH 0/2] drm/amdgpu/display: Make multi-plane configurations more flexible

2024-03-15 Thread sunpeng.li
From: Leo Li 

These patches aim to make the amdgpgu KMS driver play nicer with compositors
when building multi-plane scanout configurations. They do so by:

1. Making cursor behavior more sensible.
2. Allowing placement of DRM OVERLAY planes underneath the PRIMARY plane for
   'underlay' configurations (perhaps more of a RFC, see below).

Please see the commit messages for details.


For #2, the simplest way to accomplish this was to increase the value of the
immutable zpos property for the PRIMARY plane. This allowed OVERLAY planes with
a mutable zpos range of (0-254) to be positioned underneath the PRIMARY for an
underlay scanout configuration.

Technically speaking, DCN hardware does not have a concept of primary or overlay
planes - there are simply 4 general purpose hardware pipes that can be maped in
any configuration. So the immutable zpos restriction on the PRIMARY plane is
kind of arbitrary; it can have a mutable range of (0-254) just like the
OVERLAYs. The distinction between PRIMARY and OVERLAY planes is also somewhat
arbitrary. We can interpret PRIMARY as the first plane that should be enabled on
a CRTC, but beyond that, it doesn't mean much for amdgpu.

Therefore, I'm curious about how compositors devs understand KMS planes and
their zpos properties, and how we would like to use them. It isn't clear to me
how compositors wish to interpret and use the DRM zpos property, or
differentiate between OVERLAY and PRIMARY planes, when it comes to setting up
multi-plane scanout.

Ultimately, what I'd like to answer is "What can we do on the KMS driver and DRM
plane API side, that can make building multi-plane scanout configurations easier
for compositors?" I'm hoping we can converge on something, whether that be
updating the existing documentation to better define the usage, or update the
API to provide support for something that is lacking.

Thanks,
Leo


Some links to provide context and details:
* What is underlay?: 
https://gitlab.freedesktop.org/emersion/libliftoff/-/issues/76
* Discussion on how to implement underlay on Weston: 
https://gitlab.freedesktop.org/wayland/weston/-/merge_requests/1258#note_2325164

Cc: Joshua Ashton 
Cc: Michel Dänzer 
Cc: Chao Guo 
Cc: Xaver Hugl 
Cc: Vikas Korjani 
Cc: Robert Mader 
Cc: Pekka Paalanen 
Cc: Sean Paul 
Cc: Simon Ser 
Cc: Shashank Sharma 
Cc: Harry Wentland 
Cc: Sebastian Wick 

Leo Li (2):
  drm/amd/display: Introduce overlay cursor mode
  drm/amd/display: Move PRIMARY plane zpos higher

 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 405 --
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h |   7 +
 .../amd/display/amdgpu_dm/amdgpu_dm_crtc.c|   1 +
 .../amd/display/amdgpu_dm/amdgpu_dm_plane.c   |  28 +-
 4 files changed, 391 insertions(+), 50 deletions(-)

-- 
2.44.0



[PATCH 1/2] drm/amd/display: Introduce overlay cursor mode

2024-03-15 Thread sunpeng.li
From: Leo Li 

[Why]

DCN is the display hardware for amdgpu. DRM planes are backed by DCN
hardware pipes, which carry pixel data from one end (memory), to the
other (output encoder).

Each DCN pipe has the ability to blend in a cursor early on in the
pipeline. In other words, there are no dedicated cursor planes in DCN,
which makes cursor behavior somewhat unintuitive for compositors.

For example, if the cursor is in RGB format, but the top-most DRM plane
is in YUV format, DCN will not be able to blend them. Because of this,
amdgpu_dm rejects all configurations where a cursor needs to be enabled
on top of a YUV formatted plane.

>From a compositor's perspective, when computing an allocation for
hardware plane offloading, this cursor-on-yuv configuration result in an
atomic test failure. Since the failure reason is not obvious at all,
compositors will likely fall back to full rendering, which is not ideal.

Instead, amdgpu_dm can try to accommodate the cursor-on-yuv
configuration by opportunistically reserving a separate DCN pipe just
for the cursor. We can refer to this as "overlay cursor mode". It is
contrasted with "native cursor mode", where the native DCN per-pipe
cursor is used.

[How]

On each crtc, compute whether the cursor plane should be enabled in
overlay mode (which currently, is iff the immediate plane below has a
YUV format). If it is, mark the CRTC as requesting overlay cursor mode.

During DC validation, attempt to enable a separate DCN pipe for the
cursor if it's in overlay mode. If that fails, or if no overlay mode is
requested, then fallback to native mode.

Signed-off-by: Leo Li 
---
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 309 +++---
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h |   7 +
 .../amd/display/amdgpu_dm/amdgpu_dm_crtc.c|   1 +
 .../amd/display/amdgpu_dm/amdgpu_dm_plane.c   |  13 +-
 4 files changed, 288 insertions(+), 42 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 21a61454c878..09ab330aed17 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -8359,8 +8359,19 @@ static void amdgpu_dm_commit_planes(struct 
drm_atomic_state *state,
 * Disable the cursor first if we're disabling all the planes.
 * It'll remain on the screen after the planes are re-enabled
 * if we don't.
+*
+* If the cursor is transitioning from native to overlay mode, the
+* native cursor needs to be disabled first.
 */
-   if (acrtc_state->active_planes == 0)
+   if (acrtc_state->cursor_mode == DM_CURSOR_OVERLAY_MODE &&
+   dm_old_crtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE) {
+   struct dc_cursor_position cursor_position = {0};
+   dc_stream_set_cursor_position(acrtc_state->stream,
+ &cursor_position);
+   }
+
+   if (acrtc_state->active_planes == 0 &&
+   dm_old_crtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE)
amdgpu_dm_commit_cursors(state);
 
/* update planes when needed */
@@ -8374,7 +8385,8 @@ static void amdgpu_dm_commit_planes(struct 
drm_atomic_state *state,
struct dm_plane_state *dm_new_plane_state = 
to_dm_plane_state(new_plane_state);
 
/* Cursor plane is handled after stream updates */
-   if (plane->type == DRM_PLANE_TYPE_CURSOR) {
+   if (plane->type == DRM_PLANE_TYPE_CURSOR &&
+   acrtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE) {
if ((fb && crtc == pcrtc) ||
(old_plane_state->fb && old_plane_state->crtc == 
pcrtc))
cursor_update = true;
@@ -8727,7 +8739,8 @@ static void amdgpu_dm_commit_planes(struct 
drm_atomic_state *state,
 * This avoids redundant programming in the case where we're going
 * to be disabling a single plane - those pipes are being disabled.
 */
-   if (acrtc_state->active_planes)
+   if (acrtc_state->active_planes &&
+   acrtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE)
amdgpu_dm_commit_cursors(state);
 
 cleanup:
@@ -10039,7 +10052,8 @@ static bool should_reset_plane(struct drm_atomic_state 
*state,
 {
struct drm_plane *other;
struct drm_plane_state *old_other_state, *new_other_state;
-   struct drm_crtc_state *new_crtc_state;
+   struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+   struct dm_crtc_state *old_dm_crtc_state, *new_dm_crtc_state;
struct amdgpu_device *adev = drm_to_adev(plane->dev);
int i;
 
@@ -10061,10 +10075,24 @@ static bool should_reset_plane(struct 
drm_atomic_state *state,
 
new_crtc_state =
drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
+   old_crtc_state =
+   drm_atomic_g

[PATCH 2/2] drm/amd/display: Move PRIMARY plane zpos higher

2024-03-15 Thread sunpeng.li
From: Leo Li 

[Why]

Compositors have different ways of assigning surfaces to DRM planes for
render offloading. It may decide between various strategies: overlay,
underlay, or a mix of both

One way for compositors to implement the underlay strategy is to assign
a higher zpos to the DRM_PRIMARY plane than the DRM_OVERLAY planes,
effectively turning the DRM_OVERLAY plane into an underlay plane.

Today, amdgpu attaches an immutable zpos of 0 to the DRM_PRIMARY plane.
This however, is an arbitrary restriction. DCN pipes are general
purpose, and can be arranged in any z-order. To support compositors
using this allocation scheme, we can set a non-zero immutable zpos for
the PRIMARY, allowing the placement of OVERLAYS (mutable zpos range
0-254) beneath the PRIMARY.

[How]

Assign a zpos = #no of OVERLAY planes to the PRIMARY plane. Then, clean
up any assumptions in the driver of PRIMARY plane having the lowest
zpos.

Signed-off-by: Leo Li 
---
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 96 ++-
 .../amd/display/amdgpu_dm/amdgpu_dm_plane.c   | 17 +++-
 2 files changed, 104 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 09ab330aed17..01b00f587701 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -80,6 +80,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #include 
 #include 
@@ -369,6 +370,20 @@ static inline void reverse_planes_order(struct 
dc_surface_update *array_of_surfa
swap(array_of_surface_update[i], array_of_surface_update[j]);
 }
 
+/*
+ * DC will program planes with their z-order determined by their ordering
+ * in the dc_surface_updates array. This comparator is used to sort them
+ * by descending zpos.
+ */
+static int dm_plane_layer_index_cmp(const void *a, const void *b)
+{
+   const struct dc_surface_update *sa = (struct dc_surface_update *)a;
+   const struct dc_surface_update *sb = (struct dc_surface_update *)b;
+
+   /* Sort by descending dc_plane layer_index (i.e. normalized_zpos) */
+   return sb->surface->layer_index - sa->surface->layer_index;
+}
+
 /**
  * update_planes_and_stream_adapter() - Send planes to be updated in DC
  *
@@ -393,7 +408,8 @@ static inline bool update_planes_and_stream_adapter(struct 
dc *dc,
struct dc_stream_update 
*stream_update,
struct dc_surface_update 
*array_of_surface_update)
 {
-   reverse_planes_order(array_of_surface_update, planes_count);
+   sort(array_of_surface_update, planes_count,
+sizeof(*array_of_surface_update), dm_plane_layer_index_cmp, NULL);
 
/*
 * Previous frame finished and HW is ready for optimization.
@@ -9363,6 +9379,8 @@ static void amdgpu_dm_atomic_commit_tail(struct 
drm_atomic_state *state)
for (j = 0; j < status->plane_count; j++)
dummy_updates[j].surface = status->plane_states[0];
 
+   sort(dummy_updates, status->plane_count,
+sizeof(*dummy_updates), dm_plane_layer_index_cmp, NULL);
 
mutex_lock(&dm->dc_lock);
dc_update_planes_and_stream(dm->dc,
@@ -10097,6 +10115,17 @@ static bool should_reset_plane(struct drm_atomic_state 
*state,
if (new_crtc_state->color_mgmt_changed)
return true;
 
+   /*
+* On zpos change, planes need to be reordered by removing and re-adding
+* them one by one to the dc state, in order of descending zpos.
+*
+* TODO: We can likely skip bandwidth validation if the only thing that
+* changed about the plane was it'z z-ordering.
+*/
+   if (new_crtc_state->zpos_changed) {
+   return true;
+   }
+
if (drm_atomic_crtc_needs_modeset(new_crtc_state))
return true;
 
@@ -10509,6 +10538,65 @@ dm_get_plane_scale(struct drm_plane_state *plane_state,
*out_plane_scale_h = plane_state->crtc_h * 1000 / plane_src_h;
 }
 
+/*
+ * The normalized_zpos value cannot be used by this iterator directly. It's 
only
+ * calculated for enabled planes, potentially causing normalized_zpos 
collisions
+ * between enabled/disabled planes in the atomic state. We need a unique value
+ * so that the iterator will not generate the same object twice, or loop
+ * indefinitely.
+ */
+static inline struct __drm_planes_state *__get_next_zpos(
+   struct drm_atomic_state *state,
+   struct __drm_planes_state *prev)
+{
+   unsigned int highest_zpos = 0, prev_zpos = 256;
+   uint32_t highest_id = 0, prev_id = UINT_MAX;
+   struct drm_plane_state *new_plane_state;
+   struct drm_plane *plane;
+   int i, highest_i = -1;
+
+   if (prev != NULL) {
+   prev_zpos = prev->new_state->zpos;
+   prev_id = prev->ptr

[PATCH] drm/amd/display: Guard ACPI calls with CONFIG_ACPI

2024-06-10 Thread sunpeng.li
From: Leo Li 

To fix CONFIG_ACPI disabled build error.

Fixes: ec6f30c776ad ("drm/amd/display: Set default brightness according to 
ACPI")
Signed-off-by: Leo Li 
---
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 6 ++
 1 file changed, 6 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index a2c098f1b07c..6b3634db4c15 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -4572,7 +4572,9 @@ amdgpu_dm_register_backlight_device(struct 
amdgpu_dm_connector *aconnector)
struct drm_device *drm = aconnector->base.dev;
struct amdgpu_display_manager *dm = &drm_to_adev(drm)->dm;
struct backlight_properties props = { 0 };
+#if defined(CONFIG_ACPI)
struct amdgpu_dm_backlight_caps caps = { 0 };
+#endif
char bl_name[16];
 
if (aconnector->bl_idx == -1)
@@ -4585,6 +4587,7 @@ amdgpu_dm_register_backlight_device(struct 
amdgpu_dm_connector *aconnector)
return;
}
 
+#if defined(CONFIG_ACPI)
amdgpu_acpi_get_backlight_caps(&caps);
if (caps.caps_valid) {
if (power_supply_is_system_supplied() > 0)
@@ -4593,6 +4596,9 @@ amdgpu_dm_register_backlight_device(struct 
amdgpu_dm_connector *aconnector)
props.brightness = caps.dc_level;
} else
props.brightness = AMDGPU_MAX_BL_LEVEL;
+#else
+   props.brightness = AMDGPU_MAX_BL_LEVEL;
+#endif
 
props.max_brightness = AMDGPU_MAX_BL_LEVEL;
props.type = BACKLIGHT_RAW;
-- 
2.45.1



[PATCH v2] drm/amd/display: Guard ACPI calls with CONFIG_ACPI

2024-06-10 Thread sunpeng.li
From: Leo Li 

To fix CONFIG_ACPI disabled build error.

v2: Instead of ifdef-ing inside function, define a no-op stub for
amdgpu_acpi_get_backlight_caps when CONFIG_ACPI=n

Fixes: ec6f30c776ad ("drm/amd/display: Set default brightness according to 
ACPI")
Signed-off-by: Leo Li 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 1f71c7b98d77..083f353cff6e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1576,6 +1576,7 @@ static inline int amdgpu_acpi_power_shift_control(struct 
amdgpu_device *adev,
  u8 dev_state, bool drv_state) 
{ return 0; }
 static inline int amdgpu_acpi_smart_shift_update(struct drm_device *dev,
 enum amdgpu_ss ss_state) { 
return 0; }
+static inline void amdgpu_acpi_get_backlight_caps(struct 
amdgpu_dm_backlight_caps *caps) { }
 #endif
 
 #if defined(CONFIG_ACPI) && defined(CONFIG_SUSPEND)
-- 
2.45.1



[PATCH] Revert "drm/amd/display: Reset freesync config before update new state"

2024-07-11 Thread sunpeng.li
From: Leo Li 

This change caused PSR SU panels to not read from their remote fb,
preventing us from entering self-refresh. It is a regression.

This reverts commit f8ebe6341a6a3745ef02648b4b5c2c89fa4a9ace.

Signed-off-by: Leo Li 
---
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 1 -
 1 file changed, 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index d1527c2e46a1..19307d0024d3 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -10463,7 +10463,6 @@ static int dm_update_crtc_state(struct 
amdgpu_display_manager *dm,
}
 
/* Update Freesync settings. */
-   reset_freesync_config_for_crtc(dm_new_crtc_state);
get_freesync_config_for_crtc(dm_new_crtc_state,
 dm_new_conn_state);
 
-- 
2.45.2



[PATCH v2 1/2] drm/amd/display: Introduce overlay cursor mode

2024-04-29 Thread sunpeng.li
From: Leo Li 

[Why]

DCN is the display hardware for amdgpu. DRM planes are backed by DCN
hardware pipes, which carry pixel data from one end (memory), to the
other (output encoder).

Each DCN pipe has the ability to blend in a cursor early on in the
pipeline. In other words, there are no dedicated cursor planes in DCN,
which makes cursor behavior somewhat unintuitive for compositors.

For example, if the cursor is in RGB format, but the top-most DRM plane
is in YUV format, DCN will not be able to blend them. Because of this,
amdgpu_dm rejects all configurations where a cursor needs to be enabled
on top of a YUV formatted plane.

>From a compositor's perspective, when computing an allocation for
hardware plane offloading, this cursor-on-yuv configuration result in an
atomic test failure. Since the failure reason is not obvious at all,
compositors will likely fall back to full rendering, which is not ideal.

Instead, amdgpu_dm can try to accommodate the cursor-on-yuv
configuration by opportunistically reserving a separate DCN pipe just
for the cursor. We can refer to this as "overlay cursor mode". It is
contrasted with "native cursor mode", where the native DCN per-pipe
cursor is used.

[How]

On each crtc, compute whether the cursor plane should be enabled in
overlay mode. If it is, mark the CRTC as requesting overlay cursor mode.

Overlay cursor should be enabled whenever there exists a underlying
plane that has YUV format, or is scaled differently than the cursor. It
should also be enabled if there is no underlying plane, or if underlying
planes do not cover the entire CRTC.

During DC validation, attempt to enable a separate DCN pipe for the
cursor if it's in overlay mode. If that fails, or if no overlay mode is
requested, then fallback to native mode.

v2:
* Update commit message for when overlay cursor should be enabled
* Also consider scale and no-underlying-plane case (cursor on crtc bg)
* Consider all underlying planes when determinig overlay/native, not
  just the plane immediately beneath the cursor, as it may not cover the
  entire CRTC.
* Fix typo s/decending/descending/
* Force native cursor on pre-DCN hardware

Signed-off-by: Leo Li 
Acked-by: Harry Wentland 
Acked-by: Pekka Paalanen 
---
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 490 +-
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h |   7 +
 .../amd/display/amdgpu_dm/amdgpu_dm_crtc.c|   1 +
 .../amd/display/amdgpu_dm/amdgpu_dm_plane.c   |  13 +-
 4 files changed, 386 insertions(+), 125 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 8245cc63712f..b4b5b73707c1 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -8490,8 +8490,22 @@ static void amdgpu_dm_commit_planes(struct 
drm_atomic_state *state,
 * Disable the cursor first if we're disabling all the planes.
 * It'll remain on the screen after the planes are re-enabled
 * if we don't.
+*
+* If the cursor is transitioning from native to overlay mode, the
+* native cursor needs to be disabled first.
 */
-   if (acrtc_state->active_planes == 0)
+   if (acrtc_state->cursor_mode == DM_CURSOR_OVERLAY_MODE &&
+   dm_old_crtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE) {
+   struct dc_cursor_position cursor_position = {0};
+
+   dc_stream_set_cursor_position(acrtc_state->stream,
+ &cursor_position);
+   bundle->stream_update.cursor_position =
+   &acrtc_state->stream->cursor_position;
+   }
+
+   if (acrtc_state->active_planes == 0 &&
+   dm_old_crtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE)
amdgpu_dm_commit_cursors(state);
 
/* update planes when needed */
@@ -8505,7 +8519,8 @@ static void amdgpu_dm_commit_planes(struct 
drm_atomic_state *state,
struct dm_plane_state *dm_new_plane_state = 
to_dm_plane_state(new_plane_state);
 
/* Cursor plane is handled after stream updates */
-   if (plane->type == DRM_PLANE_TYPE_CURSOR) {
+   if (plane->type == DRM_PLANE_TYPE_CURSOR &&
+   acrtc_state->cursor_mode == DM_CURSOR_NATIVE_MODE) {
if ((fb && crtc == pcrtc) ||
(old_plane_state->fb && old_plane_state->crtc == 
pcrtc)) {
cursor_update = true;
@@ -8863,7 +8878,8 @@ static void amdgpu_dm_commit_planes(struct 
drm_atomic_state *state,
 * to be disabling a single plane - those pipes are being disabled.
 */
if (acrtc_state->active_planes &&
-   (!updated_planes_and_streams || amdgpu_ip_version(dm->adev, 
DCE_HWIP, 0) == 0))
+   (!updated_planes_and_streams || amdgpu_ip_version(dm->adev, 
DCE_HWIP, 0) == 0) &&
+ 

[PATCH v2 2/2] drm/amd/display: Move PRIMARY plane zpos higher

2024-04-29 Thread sunpeng.li
From: Leo Li 

[Why]

Compositors have different ways of assigning surfaces to DRM planes for
render offloading. It may decide between various strategies: overlay,
underlay, or a mix of both (see here for more info:
https://gitlab.freedesktop.org/emersion/libliftoff/-/issues/76)

One way for compositors to implement the underlay strategy is to assign
a higher zpos to the DRM_PRIMARY plane than the DRM_OVERLAY planes,
effectively turning the DRM_OVERLAY plane into an underlay plane.

Today, amdgpu attaches an immutable zpos of 0 to the DRM_PRIMARY plane.
This however, is an arbitrary restriction. DCN pipes are general
purpose, and can be arranged in any z-order. To support compositors
using this allocation scheme, we can set a non-zero immutable zpos for
the PRIMARY, allowing the placement of OVERLAYS (mutable zpos range
0-254) beneath the PRIMARY.

[How]

Assign a zpos = #no of OVERLAY planes to the PRIMARY plane. Then, clean
up any assumptions in the driver of PRIMARY plane having the lowest
zpos.

Signed-off-by: Leo Li 
Reviewed-by: Harry Wentland 
Acked-by: Pekka Paalanen 

v2: Fix typo s/decending/descending/
---
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 34 +--
 .../amd/display/amdgpu_dm/amdgpu_dm_plane.c   | 18 +++---
 2 files changed, 44 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index b4b5b73707c1..6782ca1137d4 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -80,6 +80,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #include 
 #include 
@@ -375,6 +376,20 @@ static inline void reverse_planes_order(struct 
dc_surface_update *array_of_surfa
swap(array_of_surface_update[i], array_of_surface_update[j]);
 }
 
+/*
+ * DC will program planes with their z-order determined by their ordering
+ * in the dc_surface_updates array. This comparator is used to sort them
+ * by descending zpos.
+ */
+static int dm_plane_layer_index_cmp(const void *a, const void *b)
+{
+   const struct dc_surface_update *sa = (struct dc_surface_update *)a;
+   const struct dc_surface_update *sb = (struct dc_surface_update *)b;
+
+   /* Sort by descending dc_plane layer_index (i.e. normalized_zpos) */
+   return sb->surface->layer_index - sa->surface->layer_index;
+}
+
 /**
  * update_planes_and_stream_adapter() - Send planes to be updated in DC
  *
@@ -399,7 +414,8 @@ static inline bool update_planes_and_stream_adapter(struct 
dc *dc,
struct dc_stream_update 
*stream_update,
struct dc_surface_update 
*array_of_surface_update)
 {
-   reverse_planes_order(array_of_surface_update, planes_count);
+   sort(array_of_surface_update, planes_count,
+sizeof(*array_of_surface_update), dm_plane_layer_index_cmp, NULL);
 
/*
 * Previous frame finished and HW is ready for optimization.
@@ -9503,6 +9519,8 @@ static void amdgpu_dm_atomic_commit_tail(struct 
drm_atomic_state *state)
for (j = 0; j < status->plane_count; j++)
dummy_updates[j].surface = status->plane_states[0];
 
+   sort(dummy_updates, status->plane_count,
+sizeof(*dummy_updates), dm_plane_layer_index_cmp, NULL);
 
mutex_lock(&dm->dc_lock);
dc_update_planes_and_stream(dm->dc,
@@ -10237,6 +10255,16 @@ static bool should_reset_plane(struct drm_atomic_state 
*state,
if (new_crtc_state->color_mgmt_changed)
return true;
 
+   /*
+* On zpos change, planes need to be reordered by removing and re-adding
+* them one by one to the dc state, in order of descending zpos.
+*
+* TODO: We can likely skip bandwidth validation if the only thing that
+* changed about the plane was it'z z-ordering.
+*/
+   if (new_crtc_state->zpos_changed)
+   return true;
+
if (drm_atomic_crtc_needs_modeset(new_crtc_state))
return true;
 
@@ -11076,7 +11104,7 @@ static int amdgpu_dm_atomic_check(struct drm_device 
*dev,
}
 
/* Remove exiting planes if they are modified */
-   for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, 
new_plane_state, i) {
+   for_each_oldnew_plane_in_descending_zpos(state, plane, old_plane_state, 
new_plane_state) {
if (old_plane_state->fb && new_plane_state->fb &&
get_mem_type(old_plane_state->fb) !=
get_mem_type(new_plane_state->fb))
@@ -11121,7 +11149,7 @@ static int amdgpu_dm_atomic_check(struct drm_device 
*dev,
}
 
/* Add new/modified planes */
-   for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, 
new_plane_state, i) {
+   for_each_oldnew_plane_in_descending_z

[PATCH] drm: Add PSR version 4 macro

2022-01-17 Thread sunpeng.li
From: Leo Li 

eDP 1.5 specification defines PSR version 4.

It defines PSR1 and PSR2 support with selective-update (SU)
capabilities, with additional support for Y-coordinate and Early
Transport of the selective-update region.

This differs from PSR version 3 in that early transport is supported
for version 4, but not for version 3.

Signed-off-by: Leo Li 
---
 include/drm/drm_dp_helper.h | 1 +
 1 file changed, 1 insertion(+)

diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 3f2715eb965f..05268c51acaa 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -360,6 +360,7 @@ struct drm_dp_aux;
 # define DP_PSR_IS_SUPPORTED1
 # define DP_PSR2_IS_SUPPORTED  2   /* eDP 1.4 */
 # define DP_PSR2_WITH_Y_COORD_IS_SUPPORTED  3  /* eDP 1.4a */
+# define DP_PSR2_WITH_ET_IS_SUPPORTED   4  /* eDP 1.5 (eDP 1.4b SCR) */
 
 #define DP_PSR_CAPS 0x071   /* XXX 1.2? */
 # define DP_PSR_NO_TRAIN_ON_EXIT1
-- 
2.34.1



[PATCH] drm/amd/display: Prevent OTG shutdown during PSR SU

2022-09-27 Thread sunpeng.li
From: Leo Li 

[Why]

Enabling Z10 optimizations allows DMUB to disable the OTG during PSR
link-off. This theoretically saves power by putting more of the display
hardware to sleep. However, we observe that with PSR SU, it causes
visual artifacts, higher power usage, and potential system hang.

This is partly due to an odd behavior with the VStartup interrupt used
to signal DRM vblank events. If the OTG is toggled on/off during a PSR
link on/off cycle, the vstartup interrupt fires twice in quick
succession. This generates incorrectly timed vblank events.
Additionally, it can cause cursor updates to generate visual artifacts.

Note that this is not observed with PSR1 since PSR is fully disabled
when there are vblank event requestors. Cursor updates are also
artifact-free, likely because there are no selectively-updated (SU)
frames that can generate artifacts.

[How]

A potential solution is to disable z10 idle optimizations only when fast
updates (flips & cursor updates) are committed. A mechanism to do so
would require some thoughtful design. Let's just disable idle
optimizations for PSR2 for now.

Fixes: 7cc191ee7621 ("drm/amd/display: Implement MPO PSR SU")
Signed-off-by: Leo Li 
---
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c | 8 +++-
 1 file changed, 7 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
index c8da18e45b0e..8ca10ab3dfc1 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
@@ -170,7 +170,13 @@ bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
   &stream, 1,
   ¶ms);
 
-   power_opt |= psr_power_opt_z10_static_screen;
+   /*
+* Only enable static-screen optimizations for PSR1. For PSR SU, this
+* causes vstartup interrupt issues, used by amdgpu_dm to send vblank
+* events.
+*/
+   if (link->psr_settings.psr_version < DC_PSR_VERSION_SU_1)
+   power_opt |= psr_power_opt_z10_static_screen;
 
return dc_link_set_psr_allow_active(link, &psr_enable, false, false, 
&power_opt);
 }
-- 
2.37.3



[PATCH] drm/amdgpu: Fix mc_umc_status used uninitialized warning

2022-09-28 Thread sunpeng.li
From: Leo Li 

On ChromeOS clang build, the following warning is seen:

/mnt/host/source/src/third_party/kernel/v5.15/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c:463:6:
 error: variable 'mc_umc_status' is used uninitialized whenever 'if' condition 
is false [-Werror,-Wsometimes-uninitialized]
if (mca_addr == UMC_INVALID_ADDR) {
^~~~
/mnt/host/source/src/third_party/kernel/v5.15/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c:485:21:
 note: uninitialized use occurs here
if ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 
1 &&
   ^
/mnt/host/source/src/third_party/kernel/v5.15/drivers/gpu/drm/amd/amdgpu/../amdgpu/amdgpu.h:1208:5:
 note: expanded from macro 'REG_GET_FIELD'
(((value) & REG_FIELD_MASK(reg, field)) >> REG_FIELD_SHIFT(reg, field))
   ^
/mnt/host/source/src/third_party/kernel/v5.15/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c:463:2:
 note: remove the 'if' if its condition is always true
if (mca_addr == UMC_INVALID_ADDR) {
^~
/mnt/host/source/src/third_party/kernel/v5.15/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c:460:24:
 note: initialize the variable 'mc_umc_status' to silence this warning
uint64_t mc_umc_status, mc_umc_addrt0;
  ^
   = 0
1 error generated.
make[5]: *** 
[/mnt/host/source/src/third_party/kernel/v5.15/scripts/Makefile.build:289: 
drivers/gpu/drm/amd/amdgpu/umc_v6_7.o] Error 1

Fix by initializing mc_umc_status = 0.

Fixes: d8e19e32945e ("drm/amdgpu: support to convert dedicated umc mca address")
Signed-off-by: Leo Li 
---
 drivers/gpu/drm/amd/amdgpu/umc_v6_7.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c 
b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c
index 2cc961534542..a0d19b768346 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c
@@ -457,7 +457,7 @@ static void umc_v6_7_query_error_address(struct 
amdgpu_device *adev,
 {
uint32_t mc_umc_status_addr;
uint32_t channel_index;
-   uint64_t mc_umc_status, mc_umc_addrt0;
+   uint64_t mc_umc_status = 0, mc_umc_addrt0;
uint64_t err_addr, soc_pa, retired_page, column;
 
if (mca_addr == UMC_INVALID_ADDR) {
-- 
2.37.3



[PATCH v2] drm/amd/display: Prevent OTG shutdown during PSR SU

2022-09-28 Thread sunpeng.li
From: Leo Li 

[Why]

Enabling Z10 optimizations allows DMUB to disable the OTG during PSR
link-off. This theoretically saves power by putting more of the display
hardware to sleep. However, we observe that with PSR SU, it causes
visual artifacts, higher power usage, and potential system hang.

This is partly due to an odd behavior with the VStartup interrupt used
to signal DRM vblank events. If the OTG is toggled on/off during a PSR
link on/off cycle, the vstartup interrupt fires twice in quick
succession. This generates incorrectly timed vblank events.
Additionally, it can cause cursor updates to generate visual artifacts.

Note that this is not observed with PSR1 since PSR is fully disabled
when there are vblank event requestors. Cursor updates are also
artifact-free, likely because there are no selectively-updated (SU)
frames that can generate artifacts.

[How]

A potential solution is to disable z10 idle optimizations only when fast
updates (flips & cursor updates) are committed. A mechanism to do so
would require some thoughtful design. Let's just disable idle
optimizations for PSR2 for now.

Fixes: 7cc191ee7621 ("drm/amd/display: Implement MPO PSR SU")
Reported-by: August Wikerfors 
Link: 
https://lore.kernel.org/r/c1f8886a-5624-8f49-31b1-e42b6d20d...@augustwikerfors.se/
Tested-by: August Wikerfors 
Reviewed-by: Harry Wentland 
Signed-off-by: Leo Li 
---
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c | 8 +++-
 1 file changed, 7 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
index c8da18e45b0e..8ca10ab3dfc1 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
@@ -170,7 +170,13 @@ bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
   &stream, 1,
   ¶ms);
 
-   power_opt |= psr_power_opt_z10_static_screen;
+   /*
+* Only enable static-screen optimizations for PSR1. For PSR SU, this
+* causes vstartup interrupt issues, used by amdgpu_dm to send vblank
+* events.
+*/
+   if (link->psr_settings.psr_version < DC_PSR_VERSION_SU_1)
+   power_opt |= psr_power_opt_z10_static_screen;
 
return dc_link_set_psr_allow_active(link, &psr_enable, false, false, 
&power_opt);
 }
-- 
2.37.3



[PATCH] drm/amd/display: Fix DC_FP_START/END use on non-DCN builds

2022-02-17 Thread sunpeng.li
From: Leo Li 

DC_FP_START/END is only defined for CONFIG_DRM_AMD_DC_DCN enabled
builds, and update_bw_bounding_box() is only valid for DCN ASICs.

Therefore, wrap the entire thing in the ifdef guard.

Acked-by: Alex Deucher 
Signed-off-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/core/dc.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c 
b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 4ea114725d61..b1ce3c0cf477 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -987,13 +987,13 @@ static bool dc_construct(struct dc *dc,
goto fail;
 #ifdef CONFIG_DRM_AMD_DC_DCN
dc->clk_mgr->force_smu_not_present = init_params->force_smu_not_present;
-#endif
 
if (dc->res_pool->funcs->update_bw_bounding_box) {
DC_FP_START();
dc->res_pool->funcs->update_bw_bounding_box(dc, 
dc->clk_mgr->bw_params);
DC_FP_END();
}
+#endif
 
/* Creation of current_state must occur after dc->dml
 * is initialized in dc_create_resource_pool because
-- 
2.34.1



[PATCH] drm/amd/display: Fix DC definition of PMFW Pstate table for DCN316

2022-02-24 Thread sunpeng.li
From: Leo Li 

[Why]

During DC init, we read power management tables from PMFW. This info is
exchanged in the form of a binary blob inside gpu memory. In order to
parse the binary blob, the correct struct needs to be used.

[How]

Fix dcn316's definition of the DfPstateTable_t struct to align with PMFW

Signed-off-by: Leo Li 
---
 .../amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c| 11 ++-
 .../drm/amd/display/dc/clk_mgr/dcn316/dcn316_smu.h|  9 +
 2 files changed, 19 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c 
b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
index 02a59adff90d..c940635b7a74 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
@@ -530,7 +530,16 @@ static void dcn316_clk_mgr_helper_populate_bw_params(
bw_params->clk_table.entries[i].fclk_mhz = 
clock_table->DfPstateTable[j].FClk;
bw_params->clk_table.entries[i].memclk_mhz = 
clock_table->DfPstateTable[j].MemClk;
bw_params->clk_table.entries[i].voltage = 
clock_table->DfPstateTable[j].Voltage;
-   bw_params->clk_table.entries[i].wck_ratio = 1;
+   switch (clock_table->DfPstateTable[j].WckRatio) {
+   case WCK_RATIO_1_2:
+   bw_params->clk_table.entries[i].wck_ratio = 2;
+   break;
+   case WCK_RATIO_1_4:
+   bw_params->clk_table.entries[i].wck_ratio = 4;
+   break;
+   default:
+   bw_params->clk_table.entries[i].wck_ratio = 1;
+   }
temp = find_clk_for_voltage(clock_table, 
clock_table->DcfClocks, clock_table->DfPstateTable[j].Voltage);
if (temp)
bw_params->clk_table.entries[i].dcfclk_mhz = temp;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_smu.h 
b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_smu.h
index 4c6b202fe622..658b36d0e107 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_smu.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_smu.h
@@ -57,10 +57,19 @@ typedef enum {
   WM_COUNT,
 } WM_CLOCK_e;
 
+typedef enum{
+  WCK_RATIO_1_1 = 0,  // DDR5, Wck:ck is always 1:1;
+  WCK_RATIO_1_2,
+  WCK_RATIO_1_4,
+  WCK_RATIO_MAX
+} WCK_RATIO_e;
+
 typedef struct {
   uint32_t FClk;
   uint32_t MemClk;
   uint32_t Voltage;
+  uint8_t  WckRatio;
+  uint8_t  Spare[3];
 } DfPstateTable_t;
 
 //Freq in MHz
-- 
2.35.1



[PATCH 1/2] drm/amd/display: Add visualconfirm module parameter

2022-07-07 Thread sunpeng.li
From: Leo Li 

[Why]

Being able to configure visual confirm at boot or in cmdline is helpful
when debugging.

[How]

Add a module parameter to configure DC visual confirm, which works the
same way as the equivalent debugfs entry.

Signed-off-by: Leo Li 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h   | 1 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c   | 4 
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 2 ++
 3 files changed, 7 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 845d6054992a..4629bef6c44e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -197,6 +197,7 @@ extern uint amdgpu_smu_memory_pool_size;
 extern int amdgpu_smu_pptable_id;
 extern uint amdgpu_dc_feature_mask;
 extern uint amdgpu_dc_debug_mask;
+extern uint amdgpu_dc_visual_confirm;
 extern uint amdgpu_dm_abm_level;
 extern int amdgpu_backlight;
 extern struct amdgpu_mgpu_info mgpu_info;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 1cc9260e75de..e1e8cf70e719 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -167,6 +167,7 @@ int amdgpu_smu_pptable_id = -1;
  */
 uint amdgpu_dc_feature_mask = 2;
 uint amdgpu_dc_debug_mask;
+uint amdgpu_dc_visual_confirm;
 int amdgpu_async_gfx_ring = 1;
 int amdgpu_mcbp;
 int amdgpu_discovery = -1;
@@ -827,6 +828,9 @@ module_param_named(dcfeaturemask, amdgpu_dc_feature_mask, 
uint, 0444);
 MODULE_PARM_DESC(dcdebugmask, "all debug options disabled (default))");
 module_param_named(dcdebugmask, amdgpu_dc_debug_mask, uint, 0444);
 
+MODULE_PARM_DESC(visualconfirm, "Visual confirm (0 = off (default), 1 = MPO, 5 
= PSR)");
+module_param_named(visualconfirm, amdgpu_dc_visual_confirm, uint, 0444);
+
 /**
  * DOC: abmlevel (uint)
  * Override the default ABM (Adaptive Backlight Management) level used for DC
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index eb5efb4aa2ba..d7208c0b76b1 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1562,6 +1562,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (amdgpu_dc_debug_mask & DC_FORCE_SUBVP_MCLK_SWITCH)
adev->dm.dc->debug.force_subvp_mclk_switch = true;
 
+   adev->dm.dc->debug.visual_confirm = amdgpu_dc_visual_confirm;
+
r = dm_dmub_hw_init(adev);
if (r) {
DRM_ERROR("DMUB interface failed to initialize: status=%d\n", 
r);
-- 
2.37.0



[PATCH 2/2] drm/amd/display: Add dcdebugmask option for disabling MPO

2022-07-07 Thread sunpeng.li
From: Leo Li 

[Why & How]

It's useful to disable MPO when debugging or testing. Therefore, add a
dcdebugmask option to disable MPO.

Signed-off-by: Leo Li 
---
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 4 
 drivers/gpu/drm/amd/include/amd_shared.h  | 1 +
 2 files changed, 5 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index d7208c0b76b1..4cf11bcc01a4 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -4194,6 +4194,10 @@ static int amdgpu_dm_initialize_drm_device(struct 
amdgpu_device *adev)
for (i = 0; i < dm->dc->caps.max_planes; ++i) {
struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
 
+   /* Do not create overlay if MPO disabled */
+   if (amdgpu_dc_debug_mask & DC_DISABLE_MPO)
+   break;
+
if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
continue;
 
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h 
b/drivers/gpu/drm/amd/include/amd_shared.h
index 1db21d13726d..f175e65b853a 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -249,6 +249,7 @@ enum DC_DEBUG_MASK {
DC_DISABLE_CLOCK_GATING = 0x8,
DC_DISABLE_PSR = 0x10,
DC_FORCE_SUBVP_MCLK_SWITCH = 0x20,
+   DC_DISABLE_MPO = 0x40,
 };
 
 enum amd_dpm_forced_level;
-- 
2.37.0



[PATCH] drm/amdgpu: Check BO's requested pinning domains against its preferred_domains

2022-07-11 Thread sunpeng.li
From: Leo Li 

When pinning a buffer, we should check to see if there are any
additional restrictions imposed by bo->preferred_domains. This will
prevent the BO from being moved to an invalid domain when pinning.

For example, this can happen if the user requests to create a BO in GTT
domain for display scanout. amdgpu_dm will allow pinning to either VRAM
or GTT domains, since DCN can scanout from either or. However, in
amdgpu_bo_pin_restricted(), pinning to VRAM is preferred if there is
adequate carveout. This can lead to pinning to VRAM despite the user
requesting GTT placement for the BO.

Signed-off-by: Leo Li 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 8a7b0f6162da..50400d70ef12 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -883,6 +883,9 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 
domain,
if (WARN_ON_ONCE(min_offset > max_offset))
return -EINVAL;
 
+   /* Check domain to be pinned to against preferred domains */
+   domain = bo->preferred_domains & domain;
+
/* A shared bo cannot be migrated to VRAM */
if (bo->tbo.base.import_attach) {
if (domain & AMDGPU_GEM_DOMAIN_GTT)
-- 
2.37.0



[PATCH v2] drm/amdgpu: Check BO's requested pinning domains against its preferred_domains

2022-07-12 Thread sunpeng.li
From: Leo Li 

When pinning a buffer, we should check to see if there are any
additional restrictions imposed by bo->preferred_domains. This will
prevent the BO from being moved to an invalid domain when pinning.

For example, this can happen if the user requests to create a BO in GTT
domain for display scanout. amdgpu_dm will allow pinning to either VRAM
or GTT domains, since DCN can scanout from either or. However, in
amdgpu_bo_pin_restricted(), pinning to VRAM is preferred if there is
adequate carveout. This can lead to pinning to VRAM despite the user
requesting GTT placement for the BO.

v2: Allow the kernel to override the domain, which can happen when
exporting a BO to a V4L camera (for example).

Signed-off-by: Leo Li 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 4 
 1 file changed, 4 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 8a7b0f6162da..bbd3b8b14cfb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -883,6 +883,10 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 
domain,
if (WARN_ON_ONCE(min_offset > max_offset))
return -EINVAL;
 
+   /* Check domain to be pinned to against preferred domains */
+   if (bo->preferred_domains & domain)
+   domain = bo->preferred_domains & domain;
+
/* A shared bo cannot be migrated to VRAM */
if (bo->tbo.base.import_attach) {
if (domain & AMDGPU_GEM_DOMAIN_GTT)
-- 
2.37.0



[PATCH] drm/amd/display: Fail atomic_check early on normalize_zpos error

2023-02-13 Thread sunpeng.li
From: Leo Li 

[Why]

drm_atomic_normalize_zpos() can return an error code when there's
modeset lock contention. This was being ignored.

[How]

Bail out of atomic check if normalize_zpos() returns an error.

Fixes: b261509952bc ("drm/amd/display: Fix double cursor on non-video RGB MPO")
Signed-off-by: Leo Li 
Tested-by: Mikhail Gavrilov 
---
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 6 +-
 1 file changed, 5 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index c10982f841f98..cb2a57503000d 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -9889,7 +9889,11 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
 * `dcn10_can_pipe_disable_cursor`). By now, all modified planes are in
 * atomic state, so call drm helper to normalize zpos.
 */
-   drm_atomic_normalize_zpos(dev, state);
+   ret = drm_atomic_normalize_zpos(dev, state);
+   if (ret) {
+   drm_dbg(dev, "drm_atomic_normalize_zpos() failed\n");
+   goto fail;
+   }
 
/* Remove exiting planes if they are modified */
for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, 
new_plane_state, i) {
-- 
2.39.1



[PATCH 0/4] PSR-SU-RC DC support and some PSR-SU fixes

2022-06-02 Thread sunpeng.li
From: Leo Li 

The first two patches here add PSR SU Rate Control support to DC. Support in
amdgpu_dm is still pending to enable this fully.

The last two patches are some fixes for PSR SU.

David Zhang (3):
  drm/amd/display: expose AMD specific DPCD for PSR-SU-RC support
  drm/amd/display: Add PSR-SU-RC support in DC
  drm/amd/display: pass panel instance in dirty rect message

Robin Chen (1):
  drm/amd/display: refactor dirty rect dmub command decision

 drivers/gpu/drm/amd/display/dc/core/dc.c  | 19 ++-
 drivers/gpu/drm/amd/display/dc/core/dc_link.c | 22 +
 drivers/gpu/drm/amd/display/dc/dc_link.h  |  3 +++
 drivers/gpu/drm/amd/display/dc/dc_types.h |  2 ++
 drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c | 23 ++
 drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h |  2 ++
 .../amd/display/dc/dcn10/dcn10_hw_sequencer.c | 24 ++-
 .../drm/amd/display/dc/inc/hw/link_encoder.h  |  8 +++
 .../amd/display/include/ddc_service_types.h   |  4 
 9 files changed, 100 insertions(+), 7 deletions(-)

-- 
2.36.1



[PATCH 1/4] drm/amd/display: expose AMD specific DPCD for PSR-SU-RC support

2022-06-02 Thread sunpeng.li
From: David Zhang 

[why & how]

Expose vendor specific DPCD registers for rate controlling the eDP sink
TCON's refresh rate during PSR active. When used in combination with
PSR-SU and Freesync, it is called PSR-SU Rate Contorol, or PSR-SU-RC for
short.

v2: Add all DPCD registers required

Signed-off-by: David Zhang 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/include/ddc_service_types.h | 4 
 1 file changed, 4 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/include/ddc_service_types.h 
b/drivers/gpu/drm/amd/display/include/ddc_service_types.h
index 20a3d4e23f66..05096c644a60 100644
--- a/drivers/gpu/drm/amd/display/include/ddc_service_types.h
+++ b/drivers/gpu/drm/amd/display/include/ddc_service_types.h
@@ -41,6 +41,10 @@
 #define DP_DEVICE_ID_38EC11 0x38EC11
 #define DP_FORCE_PSRSU_CAPABILITY 0x40F
 
+#define DP_SINK_PSR_ACTIVE_VTOTAL  0x373
+#define DP_SINK_PSR_ACTIVE_VTOTAL_CONTROL_MODE 0x375
+#define DP_SOURCE_PSR_ACTIVE_VTOTAL0x376
+
 enum ddc_result {
DDC_RESULT_UNKNOWN = 0,
DDC_RESULT_SUCESSFULL,
-- 
2.36.1



[PATCH 2/4] drm/amd/display: Add PSR-SU-RC support in DC

2022-06-02 Thread sunpeng.li
From: David Zhang 

[Why]

PSR-SU Rate Control - or PSR-SU-RC - enables PSR-SU panels to work with
variable refresh rate to allow for more power savings. Lowering the
refresh rate can increase PSR residency by expanding the eDP main link
shut down duration. It can also lower panel power consumption.

There is a complication with With PSR, since the eDP main link can be
shut down. Therefore, the timing controller (TCON) on the eDP sink nees
to be able to scan out its remote buffer independant of the main link.
To allow the eDP source to specify the sink's refresh rate while the
link is off, vendor-specific DPCD registers are used. This allows the
eDP source to then "Rate Control" the panel during PSR active.

[How]

Add DC support to communicate with PSR-SU-RC supported eDP sinks. The
sink will need to know the desired VTotal during PSR active.

This change only adds support to DC, support in amdgpu_dm is still pending to
enable this fully.

Signed-off-by: David Zhang 
Signed-off-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/core/dc_link.c | 22 ++
 drivers/gpu/drm/amd/display/dc/dc_link.h  |  3 +++
 drivers/gpu/drm/amd/display/dc/dc_types.h |  2 ++
 drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c | 23 +++
 drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h |  2 ++
 .../drm/amd/display/dc/inc/hw/link_encoder.h  |  8 +++
 6 files changed, 60 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 31ffb961e18b..3d6dcaa6a483 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -1795,6 +1795,7 @@ static bool dc_link_construct_legacy(struct dc_link *link,
 */
program_hpd_filter(link);
 
+   link->psr_settings.psr_vtotal_control_support = false;
link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
 
DC_LOG_DC("BIOS object table - %s finished successfully.\n", __func__);
@@ -3207,6 +3208,7 @@ bool dc_link_setup_psr(struct dc_link *link,
/* updateSinkPsrDpcdConfig*/
union dpcd_psr_configuration psr_configuration;
union dpcd_alpm_configuration alpm_configuration;
+   union dpcd_sink_active_vtotal_control_mode vtotal_control = {0};
 
psr_context->controllerId = CONTROLLER_ID_UNDEFINED;
 
@@ -3276,6 +3278,13 @@ bool dc_link_setup_psr(struct dc_link *link,
psr_config->su_y_granularity;
psr_context->line_time_in_us =
psr_config->line_time_in_us;
+
+   if (link->psr_settings.psr_vtotal_control_support) {
+   psr_context->rate_control_caps = 
psr_config->rate_control_caps;
+   vtotal_control.bits.ENABLE = true;
+   core_link_write_dpcd(link, 
DP_SINK_PSR_ACTIVE_VTOTAL_CONTROL_MODE,
+   &vtotal_control.raw, 
sizeof(vtotal_control.raw));
+   }
}
 
psr_context->channel = link->ddc->ddc_pin->hw_info.ddc_channel;
@@ -3408,6 +3417,19 @@ void dc_link_get_psr_residency(const struct dc_link 
*link, uint32_t *residency)
*residency = 0;
 }
 
+bool dc_link_set_sink_vtotal_in_psr_active(const struct dc_link *link, 
uint16_t psr_vtotal_idle, uint16_t psr_vtotal_su)
+{
+   struct dc *dc = link->ctx->dc;
+   struct dmub_psr *psr = dc->res_pool->psr;
+
+   if (psr == NULL || !link->psr_settings.psr_feature_enabled || 
!link->psr_settings.psr_vtotal_control_support)
+   return false;
+
+   psr->funcs->psr_set_sink_vtotal_in_psr_active(psr, psr_vtotal_idle, 
psr_vtotal_su);
+
+   return true;
+}
+
 const struct dc_link_status *dc_link_get_status(const struct dc_link *link)
 {
return &link->link_status;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h 
b/drivers/gpu/drm/amd/display/dc/dc_link.h
index 0bec986a6de8..3ec189dd73da 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -100,6 +100,7 @@ struct psr_settings {
bool psr_feature_enabled;   // PSR is supported by sink
bool psr_allow_active;  // PSR is currently active
enum dc_psr_version psr_version;// Internal PSR 
version, determined based on DPCD
+   bool psr_vtotal_control_support;// Vtotal control is supported 
by sink
 
/* These parameters are calculated in Driver,
 * based on display timing and Sink capabilities.
@@ -324,6 +325,8 @@ void dc_link_get_psr_residency(const struct dc_link *link, 
uint32_t *residency);
 void dc_link_blank_all_dp_displays(struct dc *dc);
 
 void dc_link_blank_dp_stream(struct dc_link *link, bool hw_init);
+bool dc_link_set_sink_vtotal_in_psr_active(const struct dc_link *link,
+   uint16_t psr_vtotal_idle, uint16_t psr_vtotal_su);
 
 /* Request DC to detect if there is a Panel con

[PATCH 3/4] drm/amd/display: pass panel instance in dirty rect message

2022-06-02 Thread sunpeng.li
From: David Zhang 

[why]
DMUB FW uses OTG instance to get eDP panel instance. But in case
of MPO multiple pipe indexes are passed to updated the same panel.
The other OTG instance passed would be ignored causing in DMUB not
acknowledging the messages.

[how]
Add panel instance to dirty rectangle data and cursor update data
structures and pass to DMUB.

Signed-off-by: Mikita Lipski 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/core/dc.c | 5 +
 1 file changed, 5 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c 
b/drivers/gpu/drm/amd/display/dc/core/dc.c
index d4173be11903..31d83297bcb5 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -2837,10 +2837,14 @@ void dc_dmub_update_dirty_rect(struct dc *dc,
struct dc_context *dc_ctx = dc->ctx;
struct dmub_cmd_update_dirty_rect_data *update_dirty_rect;
unsigned int i, j;
+   unsigned int panel_inst = 0;
 
if (stream->link->psr_settings.psr_version != DC_PSR_VERSION_SU_1)
return;
 
+   if (!dc_get_edp_link_panel_inst(dc, stream->link, &panel_inst))
+   return;
+
memset(&cmd, 0x0, sizeof(cmd));
cmd.update_dirty_rect.header.type = DMUB_CMD__UPDATE_DIRTY_RECT;
cmd.update_dirty_rect.header.sub_type = 0;
@@ -2869,6 +2873,7 @@ void dc_dmub_update_dirty_rect(struct dc *dc,
if (pipe_ctx->plane_state != plane_state)
continue;
 
+   update_dirty_rect->panel_inst = panel_inst;
update_dirty_rect->pipe_idx = j;
dc_dmub_srv_cmd_queue(dc_ctx->dmub_srv, &cmd);
dc_dmub_srv_cmd_execute(dc_ctx->dmub_srv);
-- 
2.36.1



[PATCH 4/4] drm/amd/display: refactor dirty rect dmub command decision

2022-06-02 Thread sunpeng.li
From: Robin Chen 

[Why]
To wrap the decision logic of sending dirty rect dmub command
for both frame update and cursor update path.

Signed-off-by: Robin Chen 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/core/dc.c  | 14 ++-
 .../amd/display/dc/dcn10/dcn10_hw_sequencer.c | 24 ++-
 2 files changed, 31 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c 
b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 31d83297bcb5..645ec5bc3a7d 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -2827,6 +2827,18 @@ static void commit_planes_do_stream_update(struct dc *dc,
}
 }
 
+static bool dc_dmub_should_send_dirty_rect_cmd(struct dc *dc, struct 
dc_stream_state *stream)
+{
+   if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1)
+   return true;
+
+   if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_1 &&
+   dc->debug.enable_sw_cntl_psr)
+   return true;
+
+   return false;
+}
+
 void dc_dmub_update_dirty_rect(struct dc *dc,
   int surface_count,
   struct dc_stream_state *stream,
@@ -2839,7 +2851,7 @@ void dc_dmub_update_dirty_rect(struct dc *dc,
unsigned int i, j;
unsigned int panel_inst = 0;
 
-   if (stream->link->psr_settings.psr_version != DC_PSR_VERSION_SU_1)
+   if (!dc_dmub_should_send_dirty_rect_cmd(dc, stream))
return;
 
if (!dc_get_edp_link_panel_inst(dc, stream->link, &panel_inst))
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c 
b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 7fe06a2c0c04..5b5e0dd13fd0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -3325,6 +3325,23 @@ static bool dcn10_can_pipe_disable_cursor(struct 
pipe_ctx *pipe_ctx)
return false;
 }
 
+static bool dcn10_dmub_should_update_cursor_data(
+   struct pipe_ctx *pipe_ctx,
+   struct dc_debug_options *debug)
+{
+   if (pipe_ctx->plane_state->address.type == 
PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
+   return false;
+
+   if (pipe_ctx->stream->link->psr_settings.psr_version == 
DC_PSR_VERSION_SU_1)
+   return true;
+
+   if (pipe_ctx->stream->link->psr_settings.psr_version == 
DC_PSR_VERSION_1 &&
+   debug->enable_sw_cntl_psr)
+   return true;
+
+   return false;
+}
+
 static void dcn10_dmub_update_cursor_data(
struct pipe_ctx *pipe_ctx,
struct hubp *hubp,
@@ -3346,13 +3363,8 @@ static void dcn10_dmub_update_cursor_data(
 
struct dc_debug_options *debug = &hubp->ctx->dc->debug;
 
-   if (!debug->enable_sw_cntl_psr && 
pipe_ctx->stream->link->psr_settings.psr_version != DC_PSR_VERSION_SU_1)
+   if (!dcn10_dmub_should_update_cursor_data(pipe_ctx, debug))
return;
-
-   if (pipe_ctx->stream->link->psr_settings.psr_version == 
DC_PSR_VERSION_UNSUPPORTED ||
-   pipe_ctx->plane_state->address.type == 
PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
-   return;
-
/**
 * if cur_pos == NULL means the caller is from cursor_set_attribute
 * then driver use previous cursor position data
-- 
2.36.1



[PATCH v2 3/4] drm/amd/display: pass panel instance in DMUB dirty rect command

2022-06-03 Thread sunpeng.li
From: David Zhang 

[Why]

In cases where there are multiple eDP instances, DMUB needs to know
which instance the command is for. Today, the field for specifying the
panel_inst exists in both dmub_cmd_update_dirty_rect_data and
dmub_cmd_update_cursor_info_data.

For cursor updates, we already specify the panel_inst, but that's not
the case for dirty_rect updates. Today, a value of '0' is used (due
to initial memsetting of the cmd struct to 0)

[how]

In dc_dmub_update_dirty_rect(), Call dc_get_edp_link_panel_inst() to get
the panel_inst, and fill it in the DMUB cmd struct.

v2: Update commit message for clarity.

Signed-off-by: Mikita Lipski 
Signed-off-by: David Zhang 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/core/dc.c | 5 +
 1 file changed, 5 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c 
b/drivers/gpu/drm/amd/display/dc/core/dc.c
index d4173be11903..31d83297bcb5 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -2837,10 +2837,14 @@ void dc_dmub_update_dirty_rect(struct dc *dc,
struct dc_context *dc_ctx = dc->ctx;
struct dmub_cmd_update_dirty_rect_data *update_dirty_rect;
unsigned int i, j;
+   unsigned int panel_inst = 0;
 
if (stream->link->psr_settings.psr_version != DC_PSR_VERSION_SU_1)
return;
 
+   if (!dc_get_edp_link_panel_inst(dc, stream->link, &panel_inst))
+   return;
+
memset(&cmd, 0x0, sizeof(cmd));
cmd.update_dirty_rect.header.type = DMUB_CMD__UPDATE_DIRTY_RECT;
cmd.update_dirty_rect.header.sub_type = 0;
@@ -2869,6 +2873,7 @@ void dc_dmub_update_dirty_rect(struct dc *dc,
if (pipe_ctx->plane_state != plane_state)
continue;
 
+   update_dirty_rect->panel_inst = panel_inst;
update_dirty_rect->pipe_idx = j;
dc_dmub_srv_cmd_queue(dc_ctx->dmub_srv, &cmd);
dc_dmub_srv_cmd_execute(dc_ctx->dmub_srv);
-- 
2.36.1



[PATCH] drm/amd/display: Use pre-allocated temp struct for bounding box update

2022-06-08 Thread sunpeng.li
From: Leo Li 

[Why]

There is a theoretical problem in prior patches for reducing the stack
size of *update_bw_bounding_box() functions.

By modifying the soc.clock_limits[n] struct directly, this can cause
unintended behavior as the for loop attempts to swap rows in
clock_limits[n]. A temporary struct is still required to make sure we
stay functinoally equivalent.

[How]

Add a temporary clock_limits table to the SOC struct, and use it when
swapping rows.

Signed-off-by: Leo Li 
---
 .../drm/amd/display/dc/dml/dcn20/dcn20_fpu.c  | 33 +-
 .../amd/display/dc/dml/dcn301/dcn301_fpu.c| 36 ++-
 .../drm/amd/display/dc/dml/dcn31/dcn31_fpu.c  | 64 +++
 .../amd/display/dc/dml/display_mode_structs.h |  5 ++
 4 files changed, 82 insertions(+), 56 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c 
b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
index c2fec0d85da4..e247b2270b1d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
@@ -2015,9 +2015,8 @@ void dcn21_update_bw_bounding_box(struct dc *dc, struct 
clk_bw_params *bw_params
 
ASSERT(clk_table->num_entries);
/* Copy dcn2_1_soc.clock_limits to clock_limits to avoid copying over 
null states later */
-   for (i = 0; i < dcn2_1_soc.num_states + 1; i++) {
-   dcn2_1_soc.clock_limits[i] = dcn2_1_soc.clock_limits[i];
-   }
+   memcpy(&dcn2_1_soc._clock_tmp, &dcn2_1_soc.clock_limits,
+  sizeof(dcn2_1_soc.clock_limits));
 
for (i = 0; i < clk_table->num_entries; i++) {
/* loop backwards*/
@@ -2032,22 +2031,26 @@ void dcn21_update_bw_bounding_box(struct dc *dc, struct 
clk_bw_params *bw_params
if (i == 1)
k++;
 
-   dcn2_1_soc.clock_limits[k].state = k;
-   dcn2_1_soc.clock_limits[k].dcfclk_mhz = 
clk_table->entries[i].dcfclk_mhz;
-   dcn2_1_soc.clock_limits[k].fabricclk_mhz = 
clk_table->entries[i].fclk_mhz;
-   dcn2_1_soc.clock_limits[k].socclk_mhz = 
clk_table->entries[i].socclk_mhz;
-   dcn2_1_soc.clock_limits[k].dram_speed_mts = 
clk_table->entries[i].memclk_mhz * 2;
+   dcn2_1_soc._clock_tmp[k].state = k;
+   dcn2_1_soc._clock_tmp[k].dcfclk_mhz = 
clk_table->entries[i].dcfclk_mhz;
+   dcn2_1_soc._clock_tmp[k].fabricclk_mhz = 
clk_table->entries[i].fclk_mhz;
+   dcn2_1_soc._clock_tmp[k].socclk_mhz = 
clk_table->entries[i].socclk_mhz;
+   dcn2_1_soc._clock_tmp[k].dram_speed_mts = 
clk_table->entries[i].memclk_mhz * 2;
 
-   dcn2_1_soc.clock_limits[k].dispclk_mhz = 
dcn2_1_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
-   dcn2_1_soc.clock_limits[k].dppclk_mhz = 
dcn2_1_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
-   dcn2_1_soc.clock_limits[k].dram_bw_per_chan_gbps = 
dcn2_1_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
-   dcn2_1_soc.clock_limits[k].dscclk_mhz = 
dcn2_1_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
-   dcn2_1_soc.clock_limits[k].dtbclk_mhz = 
dcn2_1_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
-   dcn2_1_soc.clock_limits[k].phyclk_d18_mhz = 
dcn2_1_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
-   dcn2_1_soc.clock_limits[k].phyclk_mhz = 
dcn2_1_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
+   dcn2_1_soc._clock_tmp[k].dispclk_mhz = 
dcn2_1_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
+   dcn2_1_soc._clock_tmp[k].dppclk_mhz = 
dcn2_1_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
+   dcn2_1_soc._clock_tmp[k].dram_bw_per_chan_gbps = 
dcn2_1_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
+   dcn2_1_soc._clock_tmp[k].dscclk_mhz = 
dcn2_1_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
+   dcn2_1_soc._clock_tmp[k].dtbclk_mhz = 
dcn2_1_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
+   dcn2_1_soc._clock_tmp[k].phyclk_d18_mhz = 
dcn2_1_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
+   dcn2_1_soc._clock_tmp[k].phyclk_mhz = 
dcn2_1_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
 
k++;
}
+
+   memcpy(&dcn2_1_soc.clock_limits, &dcn2_1_soc._clock_tmp,
+  sizeof(dcn2_1_soc.clock_limits));
+
if (clk_table->num_entries) {
dcn2_1_soc.num_states = clk_table->num_entries + 1;
/* fill in min DF PState */
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c 
b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c
index 62cf283d9f41..e4863f0bf0f6 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c
@@ -254,6 +254,9 @@ void dcn301_update_bw_bounding_box(struct dc *dc, struct 
clk_bw_params *bw_param
 
dc_assert_fp_enabled();
 

[PATCH 00/37] DC Patches 17 Oct 2019

2019-10-17 Thread sunpeng.li
From: Leo Li 

This series has dependencies on the recent Renoir series:
https://patchwork.freedesktop.org/series/67803/#rev2

Summary of changes:
* Enable PSR on supported eDP panels
* Allow programming of negative gamma slope


Aidan Yang (2):
  drm/amd/display: Don't use optimized gamma22 with eetf
  drm/amd/display: Allow inverted gamma

Alvin Lee (1):
  drm/amd/display: Update min dcfclk

Anthony Koo (2):
  drm/amd/display: correctly populate dpp refclk in fpga
  drm/amd/display: Proper return of result when aux engine acquire fails

Aric Cyr (2):
  drm/amd/display: 3.2.55
  drm/amd/display: 3.2.56

Dmytro Laktyushkin (8):
  drm/amd/display: remove unused code
  drm/amd/display: split dcn20 fast validate into more functions
  drm/amd/display: correctly initialize dml odm variables
  drm/amd/display: move dispclk vco freq to clk mgr base
  drm/amd/display: remove unnecessary assert
  drm/amd/display: fix number of dcn21 dpm clock levels
  drm/amd/display: add embedded flag to dml
  drm/amd/display: fix avoid_split for dcn2+ validation

Eric Yang (2):
  drm/amd/display: move wm ranges reporting to end of init hw
  drm/amd/display: fix hubbub deadline programing

Jordan Lazare (1):
  drm/amd/display: Remove superfluous assert

Joshua Aberback (1):
  drm/amd/display: Apply vactive dram clock change workaround to dcn2
DMLv2

Jun Lei (4):
  drm/amd/display: add 50us buffer as WA for pstate switch in active
  drm/amd/display: add odm visual confirm
  drm/amd/display: add flag to allow diag to force enumerate edp
  drm/amd/display: do not synchronize "drr" displays

Krunoslav Kovac (1):
  drm/amd/display: Only use EETF when maxCL > max display

Lewis Huang (1):
  drm/amd/display: take signal type from link

Michael Strauss (3):
  drm/amd/display: Fix MPO & pipe split on 3-pipe dcn2x
  drm/amd/display: Passive DP->HDMI dongle detection fix
  drm/amd/display: Disable force_single_disp_pipe_split on DCN2+

Noah Abradjian (1):
  drm/amd/display: Make clk mgr the only dto update point

Paul Hsieh (1):
  drm/amd/display: audio endpoint cannot switch

Reza Amini (1):
  drm/amd/display: Add center mode for integer scaling in DC

Roman Li (2):
  drm/amd/display: Add debugfs entry for reading psr state
  drm/amd/display: Enable PSR

Sung Lee (1):
  drm/amd/display: Do not call update bounding box on dc create

Yogesh Mohan Marimuthu (1):
  drm/amd/display: map TRANSMITTER_UNIPHY_x to LINK_REGS_x

Yongqiang Sun (2):
  drm/amd/display: Add unknown clk state.
  drm/amd/display: enable vm by default for rn.

 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 133 -
 .../amd/display/amdgpu_dm/amdgpu_dm_debugfs.c |  21 ++
 .../display/amdgpu_dm/amdgpu_dm_mst_types.c   |   1 +
 .../display/dc/clk_mgr/dce100/dce_clk_mgr.c   |  14 +-
 .../dc/clk_mgr/dce112/dce112_clk_mgr.c|   4 +-
 .../display/dc/clk_mgr/dcn10/rv1_clk_mgr.c|  10 +-
 .../display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c  |  38 ++-
 .../amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c | 178 ++--
 .../amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.h |   7 -
 .../dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c   |   2 +-
 drivers/gpu/drm/amd/display/dc/core/dc.c  |   8 +-
 drivers/gpu/drm/amd/display/dc/core/dc_link.c | 149 ++
 .../gpu/drm/amd/display/dc/core/dc_link_ddc.c |  24 +-
 .../gpu/drm/amd/display/dc/core/dc_resource.c |  54 +++-
 drivers/gpu/drm/amd/display/dc/dc.h   |   9 +-
 drivers/gpu/drm/amd/display/dc/dc_ddc_types.h |   3 +-
 drivers/gpu/drm/amd/display/dc/dce/dce_aux.c  |   5 +-
 .../amd/display/dc/dce100/dce100_resource.c   |  37 ++-
 .../display/dc/dce110/dce110_hw_sequencer.c   |  11 +-
 .../amd/display/dc/dce110/dce110_resource.c   |  37 ++-
 .../amd/display/dc/dce112/dce112_resource.c   |  37 ++-
 .../amd/display/dc/dce120/dce120_resource.c   |  37 ++-
 .../drm/amd/display/dc/dce80/dce80_resource.c |  37 ++-
 .../amd/display/dc/dcn10/dcn10_cm_common.c|  22 +-
 .../amd/display/dc/dcn10/dcn10_hw_sequencer.c |   4 +
 .../drm/amd/display/dc/dcn10/dcn10_resource.c |  28 +-
 .../drm/amd/display/dc/dcn20/dcn20_hwseq.c|  33 ++-
 .../drm/amd/display/dc/dcn20/dcn20_hwseq.h|   4 +-
 .../drm/amd/display/dc/dcn20/dcn20_resource.c | 260 +++---
 .../drm/amd/display/dc/dcn20/dcn20_resource.h |  31 +++
 .../drm/amd/display/dc/dcn21/dcn21_resource.c |  78 +-
 drivers/gpu/drm/amd/display/dc/dm_pp_smu.h|   4 +-
 .../dc/dml/dcn20/display_mode_vba_20.c|   3 +-
 .../dc/dml/dcn20/display_mode_vba_20v2.c  |   3 +-
 .../amd/display/dc/dml/display_mode_structs.h |   1 +
 .../drm/amd/display/dc/dml/display_mode_vba.c |   3 +
 .../drm/amd/display/dc/dml/display_mode_vba.h |   1 +
 .../gpu/drm/amd/display/dc/inc/core_types.h   |   4 -
 .../gpu/drm/amd/display/dc/inc/hw/clk_mgr.h   |   3 +-
 .../amd/display/dc/inc/hw/clk_mgr_internal.h  |   2 -
 .../amd/display/modules/color/color_gamma.c   |  51 +++-
 41 files changed, 1074 insertions(+), 317 deletions(-)

-- 
2.23.0


[PATCH 01/37] drm/amd/display: add 50us buffer as WA for pstate switch in active

2019-10-17 Thread sunpeng.li
From: Jun Lei 

Signed-off-by: Jun Lei 
Reviewed-by: Aric Cyr 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c 
b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
index 649883777f62..6c6c486b774a 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
@@ -2577,7 +2577,8 @@ static void 
dml20_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPer
mode_lib->vba.MinActiveDRAMClockChangeMargin
+ mode_lib->vba.DRAMClockChangeLatency;
 
-   if (mode_lib->vba.MinActiveDRAMClockChangeMargin > 0) {
+   if (mode_lib->vba.MinActiveDRAMClockChangeMargin > 50) {
+   mode_lib->vba.DRAMClockChangeWatermark += 25;
mode_lib->vba.DRAMClockChangeSupport[0][0] = 
dm_dram_clock_change_vactive;
} else {
if (mode_lib->vba.SynchronizedVBlank || 
mode_lib->vba.NumberOfActivePlanes == 1) {
-- 
2.23.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 02/37] drm/amd/display: add odm visual confirm

2019-10-17 Thread sunpeng.li
From: Jun Lei 

[why]
Hard to determine if pipe combine is done with MPC or ODM

[how]
Add new visual confirm type, this will mark each MPCC tree
with a different color

Signed-off-by: Jun Lei 
Reviewed-by: Yongqiang Sun 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/dc.h   |  1 +
 .../drm/amd/display/dc/dcn20/dcn20_hwseq.c| 25 +++
 .../drm/amd/display/dc/dcn20/dcn20_hwseq.h|  4 ++-
 3 files changed, 29 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dc.h 
b/drivers/gpu/drm/amd/display/dc/dc.h
index 5967106826ca..b7e7181bad78 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -229,6 +229,7 @@ enum visual_confirm {
VISUAL_CONFIRM_DISABLE = 0,
VISUAL_CONFIRM_SURFACE = 1,
VISUAL_CONFIRM_HDR = 2,
+   VISUAL_CONFIRM_MPCTREE = 4,
 };
 
 enum dcc_option {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c 
b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index 6229a8ca0013..e237ec39d193 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -1996,6 +1996,28 @@ static void dcn20_reset_hw_ctx_wrap(
}
 }
 
+void dcn20_get_mpctree_visual_confirm_color(
+   struct pipe_ctx *pipe_ctx,
+   struct tg_color *color)
+{
+   const struct tg_color pipe_colors[6] = {
+   {MAX_TG_COLOR_VALUE, 0, 0}, // red
+   {MAX_TG_COLOR_VALUE, 0, MAX_TG_COLOR_VALUE}, // yellow
+   {0, MAX_TG_COLOR_VALUE, 0}, // blue
+   {MAX_TG_COLOR_VALUE / 2, 0, MAX_TG_COLOR_VALUE / 2}, // 
purple
+   {0, 0, MAX_TG_COLOR_VALUE}, // green
+   {MAX_TG_COLOR_VALUE, MAX_TG_COLOR_VALUE * 2 / 3, 0}, // 
orange
+   };
+
+   struct pipe_ctx *top_pipe = pipe_ctx;
+
+   while (top_pipe->top_pipe) {
+   top_pipe = top_pipe->top_pipe;
+   }
+
+   *color = pipe_colors[top_pipe->pipe_idx];
+}
+
 static void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
 {
struct hubp *hubp = pipe_ctx->plane_res.hubp;
@@ -2013,6 +2035,9 @@ static void dcn20_update_mpcc(struct dc *dc, struct 
pipe_ctx *pipe_ctx)
} else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) {
dcn10_get_surface_visual_confirm_color(
pipe_ctx, &blnd_cfg.black_color);
+   } else if (dc->debug.visual_confirm == VISUAL_CONFIRM_MPCTREE) {
+   dcn20_get_mpctree_visual_confirm_color(
+   pipe_ctx, &blnd_cfg.black_color);
}
 
if (per_pixel_alpha)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h 
b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h
index 9dbc2effa4ea..3098f1049ed7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h
@@ -109,5 +109,7 @@ bool dcn20_set_blend_lut(
struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state);
 bool dcn20_set_shaper_3dlut(
struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state);
-
+void dcn20_get_mpctree_visual_confirm_color(
+   struct pipe_ctx *pipe_ctx,
+   struct tg_color *color);
 #endif /* __DC_HWSS_DCN20_H__ */
-- 
2.23.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 04/37] drm/amd/display: Don't use optimized gamma22 with eetf

2019-10-17 Thread sunpeng.li
From: Aidan Yang 

[why]
Optimized gamma22 assumes fixed point distribution which is not true
for eetf true.

[how]
Use long calculation for eetf.

Signed-off-by: Aidan Yang 
Reviewed-by: Krunoslav Kovac 
Acked-by: Leo Li 
Acked-by: Reza Amini 
---
 .../amd/display/modules/color/color_gamma.c   | 45 +--
 1 file changed, 41 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c 
b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
index 2d8f14b69117..85dad356c9d5 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
@@ -373,7 +373,42 @@ static struct fixed31_32 translate_from_linear_space(
return dc_fixpt_mul(args->arg, args->a1);
 }
 
-static struct fixed31_32 calculate_gamma22(struct fixed31_32 arg)
+
+static struct fixed31_32 translate_from_linear_space_long(
+   struct translate_from_linear_space_args *args)
+{
+   const struct fixed31_32 one = dc_fixpt_from_int(1);
+
+   if (dc_fixpt_lt(one, args->arg))
+   return one;
+
+   if (dc_fixpt_le(args->arg, dc_fixpt_neg(args->a0)))
+   return dc_fixpt_sub(
+   args->a2,
+   dc_fixpt_mul(
+   dc_fixpt_add(
+   one,
+   args->a3),
+   dc_fixpt_pow(
+   dc_fixpt_neg(args->arg),
+   dc_fixpt_recip(args->gamma;
+   else if (dc_fixpt_le(args->a0, args->arg))
+   return dc_fixpt_sub(
+   dc_fixpt_mul(
+   dc_fixpt_add(
+   one,
+   args->a3),
+   dc_fixpt_pow(
+   args->arg,
+   dc_fixpt_recip(args->gamma))),
+   args->a2);
+   else
+   return dc_fixpt_mul(
+   args->arg,
+   args->a1);
+}
+
+static struct fixed31_32 calculate_gamma22(struct fixed31_32 arg, bool 
use_eetf)
 {
struct fixed31_32 gamma = dc_fixpt_from_fraction(22, 10);
 
@@ -384,9 +419,13 @@ static struct fixed31_32 calculate_gamma22(struct 
fixed31_32 arg)
scratch_gamma_args.a3 = dc_fixpt_zero;
scratch_gamma_args.gamma = gamma;
 
+   if (use_eetf)
+   return translate_from_linear_space_long(&scratch_gamma_args);
+
return translate_from_linear_space(&scratch_gamma_args);
 }
 
+
 static struct fixed31_32 translate_to_linear_space(
struct fixed31_32 arg,
struct fixed31_32 a0,
@@ -950,7 +989,7 @@ static bool build_freesync_hdr(struct pwl_float_data_ex 
*rgb_regamma,
if (dc_fixpt_lt(scaledX, dc_fixpt_zero))
output = dc_fixpt_zero;
else
-   output = calculate_gamma22(scaledX);
+   output = calculate_gamma22(scaledX, 
use_eetf);
 
rgb->r = output;
rgb->g = output;
@@ -2173,5 +2212,3 @@ bool  mod_color_calculate_degamma_curve(enum 
dc_transfer_func_predefined trans,
 rgb_degamma_alloc_fail:
return ret;
 }
-
-
-- 
2.23.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 13/37] drm/amd/display: move dispclk vco freq to clk mgr base

2019-10-17 Thread sunpeng.li
From: Dmytro Laktyushkin 

This value will be needed by dml and therefore should be externally
accessible.

Signed-off-by: Dmytro Laktyushkin 
Reviewed-by: Nevenko Stupar 
Acked-by: Leo Li 
---
 .../amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c| 14 +++---
 .../amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c |  4 ++--
 .../drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c | 10 +-
 .../amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c   | 14 +++---
 .../drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c  | 12 ++--
 .../drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.h  |  7 ---
 .../gpu/drm/amd/display/dc/dcn20/dcn20_resource.c  | 12 ++--
 .../gpu/drm/amd/display/dc/dcn20/dcn20_resource.h  |  6 ++
 drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h|  2 +-
 .../drm/amd/display/dc/inc/hw/clk_mgr_internal.h   |  2 --
 10 files changed, 40 insertions(+), 43 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c 
b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
index c5c8c4901eed..26db1c5d4e4d 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
@@ -147,7 +147,7 @@ int dce_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr_base)
 
/* Calculate the current DFS clock, in kHz.*/
dp_ref_clk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
-   * clk_mgr->dentist_vco_freq_khz) / target_div;
+   * clk_mgr->base.dentist_vco_freq_khz) / target_div;
 
return dce_adjust_dp_ref_freq_for_ss(clk_mgr, dp_ref_clk_khz);
 }
@@ -239,7 +239,7 @@ int dce_set_clock(
/* Make sure requested clock isn't lower than minimum threshold*/
if (requested_clk_khz > 0)
requested_clk_khz = max(requested_clk_khz,
-   clk_mgr_dce->dentist_vco_freq_khz / 64);
+   clk_mgr_dce->base.dentist_vco_freq_khz / 64);
 
/* Prepare to program display clock*/
pxl_clk_params.target_pixel_clock_100hz = requested_clk_khz * 10;
@@ -276,11 +276,11 @@ static void dce_clock_read_integrated_info(struct 
clk_mgr_internal *clk_mgr_dce)
int i;
 
if (bp->integrated_info)
-   clk_mgr_dce->dentist_vco_freq_khz = 
bp->integrated_info->dentist_vco_freq;
-   if (clk_mgr_dce->dentist_vco_freq_khz == 0) {
-   clk_mgr_dce->dentist_vco_freq_khz = 
bp->fw_info.smu_gpu_pll_output_freq;
-   if (clk_mgr_dce->dentist_vco_freq_khz == 0)
-   clk_mgr_dce->dentist_vco_freq_khz = 360;
+   clk_mgr_dce->base.dentist_vco_freq_khz = 
bp->integrated_info->dentist_vco_freq;
+   if (clk_mgr_dce->base.dentist_vco_freq_khz == 0) {
+   clk_mgr_dce->base.dentist_vco_freq_khz = 
bp->fw_info.smu_gpu_pll_output_freq;
+   if (clk_mgr_dce->base.dentist_vco_freq_khz == 0)
+   clk_mgr_dce->base.dentist_vco_freq_khz = 360;
}
 
/*update the maximum display clock for each power state*/
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c 
b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c
index 7c746ef1e32e..a6c46e903ff9 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c
@@ -81,7 +81,7 @@ int dce112_set_clock(struct clk_mgr *clk_mgr_base, int 
requested_clk_khz)
/* Make sure requested clock isn't lower than minimum threshold*/
if (requested_clk_khz > 0)
requested_clk_khz = max(requested_clk_khz,
-   clk_mgr_dce->dentist_vco_freq_khz / 62);
+   clk_mgr_dce->base.dentist_vco_freq_khz / 62);
 
dce_clk_params.target_clock_frequency = requested_clk_khz;
dce_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
@@ -135,7 +135,7 @@ int dce112_set_dispclk(struct clk_mgr_internal *clk_mgr, 
int requested_clk_khz)
/* Make sure requested clock isn't lower than minimum threshold*/
if (requested_clk_khz > 0)
requested_clk_khz = max(requested_clk_khz,
-   clk_mgr->dentist_vco_freq_khz / 62);
+   clk_mgr->base.dentist_vco_freq_khz / 62);
 
dce_clk_params.target_clock_frequency = requested_clk_khz;
dce_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c 
b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c
index 5b3d36d41822..3fab9296918a 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c
@@ -269,11 +269,11 @@ void rv1_clk_mgr_construct(struct dc_context *ctx, struct 
clk_mgr_internal *clk_
clk_mgr->base.dprefclk_khz = 60;
 
if (bp->integrated_info)
-   clk_mgr->dentist_vc

[PATCH 05/37] drm/amd/display: Remove superfluous assert

2019-10-17 Thread sunpeng.li
From: Jordan Lazare 

[Why]
For loop below the assert already checks for the number of instances to
create. ASSERT is meaningless and causing spam.

[How]
dd

Signed-off-by: Jordan Lazare 
Reviewed-by: Harry Wentland 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c | 2 --
 1 file changed, 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 
b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index ee9157b673ab..c9792c47978a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -2913,8 +2913,6 @@ bool dcn20_dwbc_create(struct dc_context *ctx, struct 
resource_pool *pool)
int i;
uint32_t pipe_count = pool->res_cap->num_dwb;
 
-   ASSERT(pipe_count > 0);
-
for (i = 0; i < pipe_count; i++) {
struct dcn20_dwbc *dwbc20 = kzalloc(sizeof(struct dcn20_dwbc),
GFP_KERNEL);
-- 
2.23.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 08/37] drm/amd/display: Add debugfs entry for reading psr state

2019-10-17 Thread sunpeng.li
From: Roman Li 

[Why]
For upcoming PSR stupport it's useful to have debug entry
to verify psr state.

[How]
 - Enable psr dc api for Linux
 - Add psr_state file to eDP connector debugfs
usage e.g.: cat /sys/kernel/debug/dri/0/DP-1/psr_state

Signed-off-by: Roman Li 
Reviewed-by: Nicholas Kazlauskas 
Acked-by: Leo Li 
---
 .../amd/display/amdgpu_dm/amdgpu_dm_debugfs.c |  21 +++
 drivers/gpu/drm/amd/display/dc/core/dc_link.c | 149 ++
 2 files changed, 170 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index e29c6314f98c..bdb37e611015 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -969,6 +969,25 @@ static int force_yuv420_output_get(void *data, u64 *val)
 DEFINE_DEBUGFS_ATTRIBUTE(force_yuv420_output_fops, force_yuv420_output_get,
 force_yuv420_output_set, "%llu\n");
 
+/*
+ *  Read PSR state
+ */
+static int psr_get(void *data, u64 *val)
+{
+   struct amdgpu_dm_connector *connector = data;
+   struct dc_link *link = connector->dc_link;
+   uint32_t psr_state = 0;
+
+   dc_link_get_psr_state(link, &psr_state);
+
+   *val = psr_state;
+
+   return 0;
+}
+
+
+DEFINE_DEBUGFS_ATTRIBUTE(psr_fops, psr_get, NULL, "%llu\n");
+
 void connector_debugfs_init(struct amdgpu_dm_connector *connector)
 {
int i;
@@ -982,6 +1001,8 @@ void connector_debugfs_init(struct amdgpu_dm_connector 
*connector)
dp_debugfs_entries[i].fops);
}
}
+   if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
+   debugfs_create_file_unsafe("psr_state", 0444, dir, connector, 
&psr_fops);
 
debugfs_create_file_unsafe("force_yuv420_output", 0644, dir, connector,
   &force_yuv420_output_fops);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 935053664160..10a04565535c 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -2436,6 +2436,155 @@ bool dc_link_set_psr_allow_active(struct dc_link *link, 
bool allow_active, bool
return true;
 }
 
+bool dc_link_get_psr_state(const struct dc_link *link, uint32_t *psr_state)
+{
+   struct dc  *core_dc = link->ctx->dc;
+   struct dmcu *dmcu = core_dc->res_pool->dmcu;
+
+   if (dmcu != NULL && link->psr_feature_enabled)
+   dmcu->funcs->get_psr_state(dmcu, psr_state);
+
+   return true;
+}
+
+bool dc_link_setup_psr(struct dc_link *link,
+   const struct dc_stream_state *stream, struct psr_config 
*psr_config,
+   struct psr_context *psr_context)
+{
+   struct dc *core_dc;
+   struct dmcu *dmcu;
+   int i;
+   /* updateSinkPsrDpcdConfig*/
+   union dpcd_psr_configuration psr_configuration;
+
+   psr_context->controllerId = CONTROLLER_ID_UNDEFINED;
+
+   if (!link)
+   return false;
+
+   core_dc = link->ctx->dc;
+   dmcu = core_dc->res_pool->dmcu;
+
+   if (!dmcu)
+   return false;
+
+
+   memset(&psr_configuration, 0, sizeof(psr_configuration));
+
+   psr_configuration.bits.ENABLE= 1;
+   psr_configuration.bits.CRC_VERIFICATION  = 1;
+   psr_configuration.bits.FRAME_CAPTURE_INDICATION  =
+   psr_config->psr_frame_capture_indication_req;
+
+   /* Check for PSR v2*/
+   if (psr_config->psr_version == 0x2) {
+   /* For PSR v2 selective update.
+* Indicates whether sink should start capturing
+* immediately following active scan line,
+* or starting with the 2nd active scan line.
+*/
+   psr_configuration.bits.LINE_CAPTURE_INDICATION = 0;
+   /*For PSR v2, determines whether Sink should generate
+* IRQ_HPD when CRC mismatch is detected.
+*/
+   psr_configuration.bits.IRQ_HPD_WITH_CRC_ERROR= 1;
+   }
+
+   dm_helpers_dp_write_dpcd(
+   link->ctx,
+   link,
+   368,
+   &psr_configuration.raw,
+   sizeof(psr_configuration.raw));
+
+   psr_context->channel = link->ddc->ddc_pin->hw_info.ddc_channel;
+   psr_context->transmitterId = link->link_enc->transmitter;
+   psr_context->engineId = link->link_enc->preferred_engine;
+
+   for (i = 0; i < MAX_PIPES; i++) {
+   if (core_dc->current_state->res_ctx.pipe_ctx[i].stream
+   == stream) {
+   /* dmcu -1 for all controller id values,
+* therefore +1 here
+*/
+   psr_context->controllerId =
+  

[PATCH 18/37] drm/amd/display: Allow inverted gamma

2019-10-17 Thread sunpeng.li
From: Aidan Yang 

[why]
There's a use case for inverted gamma
and it's been confirmed that negative slopes are ok.

[how]
Remove code for blocking non-monotonically increasing gamma

Signed-off-by: Aidan Yang 
Reviewed-by: Krunoslav Kovac 
Acked-by: Leo Li 
Acked-by: Reza Amini 
---
 .../amd/display/dc/dcn10/dcn10_cm_common.c| 22 +++
 1 file changed, 8 insertions(+), 14 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c 
b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
index 01c7e30b9ce1..bbd6e01b3eca 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
@@ -393,6 +393,10 @@ bool cm_helper_translate_curve_to_hw_format(
rgb_resulted[hw_points - 1].green = 
output_tf->tf_pts.green[start_index];
rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index];
 
+   rgb_resulted[hw_points].red = rgb_resulted[hw_points - 1].red;
+   rgb_resulted[hw_points].green = rgb_resulted[hw_points - 1].green;
+   rgb_resulted[hw_points].blue = rgb_resulted[hw_points - 1].blue;
+
// All 3 color channels have same x
corner_points[0].red.x = dc_fixpt_pow(dc_fixpt_from_int(2),
 dc_fixpt_from_int(region_start));
@@ -464,13 +468,6 @@ bool cm_helper_translate_curve_to_hw_format(
 
i = 1;
while (i != hw_points + 1) {
-   if (dc_fixpt_lt(rgb_plus_1->red, rgb->red))
-   rgb_plus_1->red = rgb->red;
-   if (dc_fixpt_lt(rgb_plus_1->green, rgb->green))
-   rgb_plus_1->green = rgb->green;
-   if (dc_fixpt_lt(rgb_plus_1->blue, rgb->blue))
-   rgb_plus_1->blue = rgb->blue;
-
rgb->delta_red   = dc_fixpt_sub(rgb_plus_1->red,   rgb->red);
rgb->delta_green = dc_fixpt_sub(rgb_plus_1->green, rgb->green);
rgb->delta_blue  = dc_fixpt_sub(rgb_plus_1->blue,  rgb->blue);
@@ -562,6 +559,10 @@ bool cm_helper_translate_curve_to_degamma_hw_format(
rgb_resulted[hw_points - 1].green = 
output_tf->tf_pts.green[start_index];
rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index];
 
+   rgb_resulted[hw_points].red = rgb_resulted[hw_points - 1].red;
+   rgb_resulted[hw_points].green = rgb_resulted[hw_points - 1].green;
+   rgb_resulted[hw_points].blue = rgb_resulted[hw_points - 1].blue;
+
corner_points[0].red.x = dc_fixpt_pow(dc_fixpt_from_int(2),
 dc_fixpt_from_int(region_start));
corner_points[0].green.x = corner_points[0].red.x;
@@ -624,13 +625,6 @@ bool cm_helper_translate_curve_to_degamma_hw_format(
 
i = 1;
while (i != hw_points + 1) {
-   if (dc_fixpt_lt(rgb_plus_1->red, rgb->red))
-   rgb_plus_1->red = rgb->red;
-   if (dc_fixpt_lt(rgb_plus_1->green, rgb->green))
-   rgb_plus_1->green = rgb->green;
-   if (dc_fixpt_lt(rgb_plus_1->blue, rgb->blue))
-   rgb_plus_1->blue = rgb->blue;
-
rgb->delta_red   = dc_fixpt_sub(rgb_plus_1->red,   rgb->red);
rgb->delta_green = dc_fixpt_sub(rgb_plus_1->green, rgb->green);
rgb->delta_blue  = dc_fixpt_sub(rgb_plus_1->blue,  rgb->blue);
-- 
2.23.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 03/37] drm/amd/display: Add unknown clk state.

2019-10-17 Thread sunpeng.li
From: Yongqiang Sun 

[Why]
System hang during S0i3 if DP only connected due to clk is disabled when
doing link training.
During S0i3, clk is disabled while the clk state is updated when ini_hw
called, and at the moment clk is still disabled which indicating a wrong
state for next time trying to enable clk.

[How]
Add an unknown state and initialize it during int_hw, make sure enable clk
command be sent to smu.

Signed-off-by: Yongqiang Sun 
Reviewed-by: Eric Yang 
Acked-by: Leo Li 
---
 .../amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c| 16 
 .../dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c  |  2 +-
 drivers/gpu/drm/amd/display/dc/dc.h  |  5 +++--
 3 files changed, 12 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c 
b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
index b647e0320e4b..6212b407cd01 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
@@ -114,22 +114,22 @@ void rn_update_clocks(struct clk_mgr *clk_mgr_base,
 */
if (safe_to_lower) {
/* check that we're not already in lower */
-   if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_OPTIMIZED) {
+   if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) {
 
display_count = rn_get_active_display_cnt_wa(dc, 
context);
/* if we can go lower, go lower */
if (display_count == 0) {
-   rn_vbios_smu_set_dcn_low_power_state(clk_mgr, 
DCN_PWR_STATE_OPTIMIZED);
+   rn_vbios_smu_set_dcn_low_power_state(clk_mgr, 
DCN_PWR_STATE_LOW_POWER);
/* update power state */
-   clk_mgr_base->clks.pwr_state = 
DCN_PWR_STATE_OPTIMIZED;
+   clk_mgr_base->clks.pwr_state = 
DCN_PWR_STATE_LOW_POWER;
}
}
} else {
-   /* check that we're not already in the normal state */
-   if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_NORMAL) {
-   rn_vbios_smu_set_dcn_low_power_state(clk_mgr, 
DCN_PWR_STATE_NORMAL);
+   /* check that we're not already in D0 */
+   if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_MISSION_MODE) 
{
+   rn_vbios_smu_set_dcn_low_power_state(clk_mgr, 
DCN_PWR_STATE_MISSION_MODE);
/* update power state */
-   clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_NORMAL;
+   clk_mgr_base->clks.pwr_state = 
DCN_PWR_STATE_MISSION_MODE;
}
}
 
@@ -393,7 +393,7 @@ void rn_init_clocks(struct clk_mgr *clk_mgr)
// Assumption is that boot state always supports pstate
clk_mgr->clks.p_state_change_support = true;
clk_mgr->clks.prev_p_state_change_support = true;
-   clk_mgr->clks.pwr_state = DCN_PWR_STATE_NORMAL;
+   clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN;
 }
 
 static struct clk_mgr_funcs dcn21_funcs = {
diff --git 
a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c 
b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
index 5647fcf10717..cb7c0e8b7e1b 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
@@ -170,7 +170,7 @@ void rn_vbios_smu_set_dcn_low_power_state(struct 
clk_mgr_internal *clk_mgr, enum
 {
int disp_count;
 
-   if (state == DCN_PWR_STATE_OPTIMIZED)
+   if (state == DCN_PWR_STATE_LOW_POWER)
disp_count = 0;
else
disp_count = 1;
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h 
b/drivers/gpu/drm/amd/display/dc/dc.h
index b7e7181bad78..2e1d34882684 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -257,8 +257,9 @@ enum dtm_pstate{
 };
 
 enum dcn_pwr_state {
-   DCN_PWR_STATE_OPTIMIZED = 0,
-   DCN_PWR_STATE_NORMAL = 1
+   DCN_PWR_STATE_UNKNOWN = -1,
+   DCN_PWR_STATE_MISSION_MODE = 0,
+   DCN_PWR_STATE_LOW_POWER = 3,
 };
 
 /*
-- 
2.23.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 10/37] drm/amd/display: correctly populate dpp refclk in fpga

2019-10-17 Thread sunpeng.li
From: Anthony Koo 

[Why]
In diags environment we are not programming the DPP DTO
correctly.

[How]
Populate the dpp refclk in dccg so it can be used to correctly
program DPP DTO.

Signed-off-by: Anthony Koo 
Reviewed-by: Tony Cheng 
Acked-by: Leo Li 
---
 .../drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c   | 10 --
 1 file changed, 8 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c 
b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
index ecd2cb4840e3..69daddbfbf29 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
@@ -260,6 +260,8 @@ void dcn2_update_clocks_fpga(struct clk_mgr *clk_mgr,
struct dc_state *context,
bool safe_to_lower)
 {
+   struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr);
+
struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
/* Min fclk = 1.2GHz since all the extra scemi logic seems to run off 
of it */
int fclk_adj = new_clocks->fclk_khz > 120 ? new_clocks->fclk_khz : 
120;
@@ -297,14 +299,18 @@ void dcn2_update_clocks_fpga(struct clk_mgr *clk_mgr,
clk_mgr->clks.dispclk_khz = new_clocks->dispclk_khz;
}
 
-   /* Both fclk and dppclk ref are run on the same scemi clock so we
-* need to keep the same value for both
+   /* Both fclk and ref_dppclk run on the same scemi clock.
+* So take the higher value since the DPP DTO is typically programmed
+* such that max dppclk is 1:1 with ref_dppclk.
 */
if (clk_mgr->clks.fclk_khz > clk_mgr->clks.dppclk_khz)
clk_mgr->clks.dppclk_khz = clk_mgr->clks.fclk_khz;
if (clk_mgr->clks.dppclk_khz > clk_mgr->clks.fclk_khz)
clk_mgr->clks.fclk_khz = clk_mgr->clks.dppclk_khz;
 
+   // Both fclk and ref_dppclk run on the same scemi clock.
+   clk_mgr_int->dccg->ref_dppclk = clk_mgr->clks.fclk_khz;
+
dm_set_dcn_clocks(clk_mgr->ctx, &clk_mgr->clks);
 }
 
-- 
2.23.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 24/37] drm/amd/display: Passive DP->HDMI dongle detection fix

2019-10-17 Thread sunpeng.li
From: Michael Strauss 

[WHY]
i2c_read is called to differentiate passive DP->HDMI and DP->DVI-D dongles
The call is expected to fail in DVI-D case but pass in HDMI case
Some HDMI dongles have a chance to fail as well, causing misdetection as DVI-D

[HOW]
Retry i2c_read to ensure failed result is valid

Signed-off-by: Michael Strauss 
Reviewed-by: Tony Cheng 
Acked-by: Leo Li 
---
 .../gpu/drm/amd/display/dc/core/dc_link_ddc.c | 24 ++-
 1 file changed, 18 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
index 9a56f110bbd1..7f904d55c1bc 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
@@ -374,6 +374,7 @@ void dal_ddc_service_i2c_query_dp_dual_mode_adaptor(
enum display_dongle_type *dongle = &sink_cap->dongle_type;
uint8_t type2_dongle_buf[DP_ADAPTOR_TYPE2_SIZE];
bool is_type2_dongle = false;
+   int retry_count = 2;
struct dp_hdmi_dongle_signature_data *dongle_signature;
 
/* Assume we have no valid DP passive dongle connected */
@@ -386,13 +387,24 @@ void dal_ddc_service_i2c_query_dp_dual_mode_adaptor(
DP_HDMI_DONGLE_ADDRESS,
type2_dongle_buf,
sizeof(type2_dongle_buf))) {
-   *dongle = DISPLAY_DONGLE_DP_DVI_DONGLE;
-   sink_cap->max_hdmi_pixel_clock = DP_ADAPTOR_DVI_MAX_TMDS_CLK;
+   /* Passive HDMI dongles can sometimes fail here without 
retrying*/
+   while (retry_count > 0) {
+   if (i2c_read(ddc,
+   DP_HDMI_DONGLE_ADDRESS,
+   type2_dongle_buf,
+   sizeof(type2_dongle_buf)))
+   break;
+   retry_count--;
+   }
+   if (retry_count == 0) {
+   *dongle = DISPLAY_DONGLE_DP_DVI_DONGLE;
+   sink_cap->max_hdmi_pixel_clock = 
DP_ADAPTOR_DVI_MAX_TMDS_CLK;
 
-   CONN_DATA_DETECT(ddc->link, type2_dongle_buf, 
sizeof(type2_dongle_buf),
-   "DP-DVI passive dongle %dMhz: ",
-   DP_ADAPTOR_DVI_MAX_TMDS_CLK / 1000);
-   return;
+   CONN_DATA_DETECT(ddc->link, type2_dongle_buf, 
sizeof(type2_dongle_buf),
+   "DP-DVI passive dongle %dMhz: ",
+   DP_ADAPTOR_DVI_MAX_TMDS_CLK / 1000);
+   return;
+   }
}
 
/* Check if Type 2 dongle.*/
-- 
2.23.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 22/37] drm/amd/display: add flag to allow diag to force enumerate edp

2019-10-17 Thread sunpeng.li
From: Jun Lei 

[why]
SLT tests require that diag can drive eDP even if nothing is connected, this is 
not
typical production use case, so we need to add flag

[how]
add flag, and this flag supercedes "should destroy" logic

Signed-off-by: Jun Lei 
Reviewed-by: Aric Cyr 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/core/dc.c | 2 +-
 drivers/gpu/drm/amd/display/dc/dc.h  | 1 +
 2 files changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c 
b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 55b82ca44c3b..5e487bb82861 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -194,7 +194,7 @@ static bool create_links(
}
}
 
-   if (!should_destory_link) {
+   if (dc->config.force_enum_edp || !should_destory_link) {
dc->links[dc->link_count] = link;
link->dc = dc;
++dc->link_count;
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h 
b/drivers/gpu/drm/amd/display/dc/dc.h
index a86dad3808b6..b578b2148e45 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -220,6 +220,7 @@ struct dc_config {
bool allow_seamless_boot_optimization;
bool power_down_display_on_boot;
bool edp_not_connected;
+   bool force_enum_edp;
bool forced_clocks;
bool disable_extended_timeout_support; // Used to disable extended 
timeout and lttpr feature as well
bool multi_mon_pp_mclk_switch;
-- 
2.23.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 25/37] drm/amd/display: Disable force_single_disp_pipe_split on DCN2+

2019-10-17 Thread sunpeng.li
From: Michael Strauss 

[WHY]
force_single_disp_pipe_split is a debug flag for use on DCN1
but isn't necessary otherwise as DCN2+ splits by default

Signed-off-by: Michael Strauss 
Reviewed-by: Tony Cheng 
Acked-by: Charlene Liu 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c | 2 +-
 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 
b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index 88a938633d11..a1b2db8f687a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -863,7 +863,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.clock_trace = true,
.disable_pplib_clock_request = true,
.pipe_split_policy = MPC_SPLIT_DYNAMIC,
-   .force_single_disp_pipe_split = true,
+   .force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE,
.vsr_support = true,
.performance_trace = false,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 
b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
index e07f03368c97..f165f7e58da9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
@@ -836,7 +836,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.clock_trace = true,
.disable_pplib_clock_request = true,
.pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
-   .force_single_disp_pipe_split = true,
+   .force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE,
.vsr_support = true,
.performance_trace = false,
-- 
2.23.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 19/37] drm/amd/display: enable vm by default for rn.

2019-10-17 Thread sunpeng.li
From: Yongqiang Sun 

[Why & How]
vm should be enabled by default for rn to get
right dml.

Signed-off-by: Yongqiang Sun 
Reviewed-by: Dmytro Laktyushkin 
Reviewed-by: Tony Cheng 
Acked-by: Leo Li 
---
 .../drm/amd/display/dc/dcn21/dcn21_resource.c | 29 ---
 1 file changed, 25 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 
b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
index 5e3b48bb04f1..a4d9ed9f2623 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
@@ -83,8 +83,8 @@
 
 struct _vcs_dpi_ip_params_st dcn2_1_ip = {
.odm_capable = 1,
-   .gpuvm_enable = 0,
-   .hostvm_enable = 0,
+   .gpuvm_enable = 1,
+   .hostvm_enable = 1,
.gpuvm_max_page_table_levels = 1,
.hostvm_max_page_table_levels = 4,
.hostvm_cached_page_table_levels = 2,
@@ -669,6 +669,9 @@ static const struct dcn10_stream_encoder_mask se_mask = {
 
 static void dcn21_pp_smu_destroy(struct pp_smu_funcs **pp_smu);
 
+static int dcn21_populate_dml_pipes_from_context(
+   struct dc *dc, struct resource_context *res_ctx, 
display_e2e_pipe_params_st *pipes);
+
 static struct input_pixel_processor *dcn21_ipp_create(
struct dc_context *ctx, uint32_t inst)
 {
@@ -1083,7 +1086,7 @@ void dcn21_calculate_wm(
pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc,
&context->res_ctx, pipes);
else
-   pipe_cnt = dcn20_populate_dml_pipes_from_context(dc,
+   pipe_cnt = dcn21_populate_dml_pipes_from_context(dc,
&context->res_ctx, pipes);
}
 
@@ -1585,11 +1588,29 @@ static uint32_t read_pipe_fuses(struct dc_context *ctx)
return value;
 }
 
+static int dcn21_populate_dml_pipes_from_context(
+   struct dc *dc, struct resource_context *res_ctx, 
display_e2e_pipe_params_st *pipes)
+{
+   uint32_t pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, res_ctx, 
pipes);
+   int i;
+
+   for (i = 0; i < dc->res_pool->pipe_count; i++) {
+
+   if (!res_ctx->pipe_ctx[i].stream)
+   continue;
+
+   pipes[i].pipe.src.hostvm = 1;
+   pipes[i].pipe.src.gpuvm = 1;
+   }
+
+   return pipe_cnt;
+}
+
 static struct resource_funcs dcn21_res_pool_funcs = {
.destroy = dcn21_destroy_resource_pool,
.link_enc_create = dcn21_link_encoder_create,
.validate_bandwidth = dcn21_validate_bandwidth,
-   .populate_dml_pipes = dcn20_populate_dml_pipes_from_context,
+   .populate_dml_pipes = dcn21_populate_dml_pipes_from_context,
.add_stream_to_ctx = dcn20_add_stream_to_ctx,
.remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
.acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
-- 
2.23.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 20/37] drm/amd/display: fix number of dcn21 dpm clock levels

2019-10-17 Thread sunpeng.li
From: Dmytro Laktyushkin 

These are specific to dcn21 and should not be increased for
reuse on other asics.

Signed-off-by: Dmytro Laktyushkin 
Reviewed-by: Chris Park 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/dm_pp_smu.h | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h 
b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
index b01db61b6181..ef7df9ef6d7e 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
@@ -251,8 +251,8 @@ struct pp_smu_funcs_nv {
 
 #define PP_SMU_NUM_SOCCLK_DPM_LEVELS  8
 #define PP_SMU_NUM_DCFCLK_DPM_LEVELS  8
-#define PP_SMU_NUM_FCLK_DPM_LEVELS8
-#define PP_SMU_NUM_MEMCLK_DPM_LEVELS  8
+#define PP_SMU_NUM_FCLK_DPM_LEVELS4
+#define PP_SMU_NUM_MEMCLK_DPM_LEVELS  4
 
 struct dpm_clock {
   uint32_t  Freq;// In MHz
-- 
2.23.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 17/37] drm/amd/display: Update min dcfclk

2019-10-17 Thread sunpeng.li
From: Alvin Lee 

[Why]
NV12 has lower min dcfclk

[How]
Add update in update_bounding_box

Signed-off-by: Alvin Lee 
Reviewed-by: Jun Lei 
Acked-by: Leo Li 
---
 .../gpu/drm/amd/display/dc/dcn20/dcn20_resource.c| 12 
 1 file changed, 8 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 
b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index 2596d4ac6263..25515c255a3d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -3084,10 +3084,14 @@ void dcn20_update_bounding_box(struct dc *dc, struct 
_vcs_dpi_soc_bounding_box_s
 
if (dc->bb_overrides.min_dcfclk_mhz > 0)
min_dcfclk = dc->bb_overrides.min_dcfclk_mhz;
-   else
-   // Accounting for SOC/DCF relationship, we can go as high as
-   // 506Mhz in Vmin.  We need to code 507 since SMU will round 
down to 506.
-   min_dcfclk = 507;
+   else {
+   if (ASICREV_IS_NAVI12_P(dc->ctx->asic_id.hw_internal_rev))
+   min_dcfclk = 310;
+   else
+   // Accounting for SOC/DCF relationship, we can go as 
high as
+   // 506Mhz in Vmin.
+   min_dcfclk = 506;
+   }
 
for (i = 0; i < num_states; i++) {
int min_fclk_required_by_uclk;
-- 
2.23.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 14/37] drm/amd/display: remove unnecessary assert

2019-10-17 Thread sunpeng.li
From: Dmytro Laktyushkin 

Signed-off-by: Dmytro Laktyushkin 
Reviewed-by: Chris Park 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 
b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index db3fb57bf244..9bc0ffad7093 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -1633,7 +1633,6 @@ bool dcn20_split_stream_for_odm(
next_odm_pipe->stream_res.dsc = NULL;
 #endif
if (prev_odm_pipe->next_odm_pipe && prev_odm_pipe->next_odm_pipe != 
next_odm_pipe) {
-   ASSERT(!next_odm_pipe->next_odm_pipe);
next_odm_pipe->next_odm_pipe = prev_odm_pipe->next_odm_pipe;
next_odm_pipe->next_odm_pipe->prev_odm_pipe = next_odm_pipe;
}
@@ -2398,8 +2397,8 @@ int dcn20_validate_apply_pipe_split_flags(
split[i] = true;

context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx] = true;
}
-   context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx]
-   = 
context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx];
+   context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx] =
+   
context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx];
/* Adjust dppclk when split is forced, do not bother with 
dispclk */
if (split[i] && 
context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx]
 == 1)

context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx]
 /= 2;
-- 
2.23.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 23/37] drm/amd/display: map TRANSMITTER_UNIPHY_x to LINK_REGS_x

2019-10-17 Thread sunpeng.li
From: Yogesh Mohan Marimuthu 

[Why]
The enum value for TRANSMITTER_UNIPHY_G is 9. In resource dc_xx_resource
file structure link_enc_regs[], the TRANSMITTER_UNIPHY_G registers are
initialized at index 6. Due to this mismatch, if monitor is attached to
port using TRANSMITTER_UNIPHY_G then the monitor blanks out.

[How]
add function map_transmitter_id_to_phy_instance() and use the function
to map enum transmitter to link regs.

Signed-off-by: Yogesh Mohan Marimuthu 
Reviewed-by: Eric Yang 
Acked-by: Leo Li 
---
 .../amd/display/dc/dce100/dce100_resource.c   | 37 ++-
 .../amd/display/dc/dce110/dce110_resource.c   | 37 ++-
 .../amd/display/dc/dce112/dce112_resource.c   | 37 ++-
 .../amd/display/dc/dce120/dce120_resource.c   | 37 ++-
 .../drm/amd/display/dc/dce80/dce80_resource.c | 37 ++-
 .../drm/amd/display/dc/dcn10/dcn10_resource.c | 28 +-
 .../drm/amd/display/dc/dcn20/dcn20_resource.c | 33 -
 .../drm/amd/display/dc/dcn21/dcn21_resource.c | 31 +++-
 8 files changed, 269 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 
b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
index b5d6dff29c45..a5e122c721ec 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
@@ -399,6 +399,37 @@ static const struct dc_plane_cap plane_cap = {
 #define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER__SHIFT 0x8
 #endif
 
+static int map_transmitter_id_to_phy_instance(
+   enum transmitter transmitter)
+{
+   switch (transmitter) {
+   case TRANSMITTER_UNIPHY_A:
+   return 0;
+   break;
+   case TRANSMITTER_UNIPHY_B:
+   return 1;
+   break;
+   case TRANSMITTER_UNIPHY_C:
+   return 2;
+   break;
+   case TRANSMITTER_UNIPHY_D:
+   return 3;
+   break;
+   case TRANSMITTER_UNIPHY_E:
+   return 4;
+   break;
+   case TRANSMITTER_UNIPHY_F:
+   return 5;
+   break;
+   case TRANSMITTER_UNIPHY_G:
+   return 6;
+   break;
+   default:
+   ASSERT(0);
+   return 0;
+   }
+}
+
 static void read_dce_straps(
struct dc_context *ctx,
struct resource_straps *straps)
@@ -579,14 +610,18 @@ struct link_encoder *dce100_link_encoder_create(
 {
struct dce110_link_encoder *enc110 =
kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL);
+   int link_regs_id;
 
if (!enc110)
return NULL;
 
+   link_regs_id =
+   map_transmitter_id_to_phy_instance(enc_init_data->transmitter);
+
dce110_link_encoder_construct(enc110,
  enc_init_data,
  &link_enc_feature,
- 
&link_enc_regs[enc_init_data->transmitter],
+ &link_enc_regs[link_regs_id],
  &link_enc_aux_regs[enc_init_data->channel 
- 1],
  
&link_enc_hpd_regs[enc_init_data->hpd_source]);
return &enc110->base;
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 
b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
index c651a38e34a0..83a4dbf6d76e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
@@ -448,6 +448,37 @@ static const struct dc_plane_cap underlay_plane_cap = {
 #define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER__SHIFT 0x8
 #endif
 
+static int map_transmitter_id_to_phy_instance(
+   enum transmitter transmitter)
+{
+   switch (transmitter) {
+   case TRANSMITTER_UNIPHY_A:
+   return 0;
+   break;
+   case TRANSMITTER_UNIPHY_B:
+   return 1;
+   break;
+   case TRANSMITTER_UNIPHY_C:
+   return 2;
+   break;
+   case TRANSMITTER_UNIPHY_D:
+   return 3;
+   break;
+   case TRANSMITTER_UNIPHY_E:
+   return 4;
+   break;
+   case TRANSMITTER_UNIPHY_F:
+   return 5;
+   break;
+   case TRANSMITTER_UNIPHY_G:
+   return 6;
+   break;
+   default:
+   ASSERT(0);
+   return 0;
+   }
+}
+
 static void read_dce_straps(
struct dc_context *ctx,
struct resource_straps *straps)
@@ -625,14 +656,18 @@ static struct link_encoder *dce110_link_encoder_create(
 {
struct dce110_link_encoder *enc110 =
kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL);
+   int link_regs_id;
 
if (!enc110)
return NULL;
 
+   link_regs_id =
+   map_transmitter_id_to_phy_instance(enc_init_data->transmitter);
+
dce110_link_encoder_construct(enc110,
 

[PATCH 28/37] drm/amd/display: move wm ranges reporting to end of init hw

2019-10-17 Thread sunpeng.li
From: Eric Yang 

[Why]
SMU does not keep the wm table across S3, S4, need to re-send
the table. Also defer sending the cable to after DCN bave initialized

[How]
Send table at end of init hw

Signed-off-by: Eric Yang 
Reviewed-by: Yongqiang Sun 
Acked-by: Leo Li 
---
 .../amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c | 149 +-
 .../amd/display/dc/dcn10/dcn10_hw_sequencer.c |   4 +
 .../gpu/drm/amd/display/dc/inc/hw/clk_mgr.h   |   1 +
 3 files changed, 81 insertions(+), 73 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c 
b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
index e8b8ee4f1b1e..f64d221ad6f1 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
@@ -396,12 +396,87 @@ void rn_init_clocks(struct clk_mgr *clk_mgr)
clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN;
 }
 
+void build_watermark_ranges(struct clk_bw_params *bw_params, struct 
pp_smu_wm_range_sets *ranges)
+{
+   int i, num_valid_sets;
+
+   num_valid_sets = 0;
+
+   for (i = 0; i < WM_SET_COUNT; i++) {
+   /* skip empty entries, the smu array has no holes*/
+   if (!bw_params->wm_table.entries[i].valid)
+   continue;
+
+   ranges->reader_wm_sets[num_valid_sets].wm_inst = 
bw_params->wm_table.entries[i].wm_inst;
+   ranges->reader_wm_sets[num_valid_sets].wm_type = 
bw_params->wm_table.entries[i].wm_type;;
+   /* We will not select WM based on dcfclk, so leave it as 
unconstrained */
+   ranges->reader_wm_sets[num_valid_sets].min_drain_clk_mhz = 
PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
+   ranges->reader_wm_sets[num_valid_sets].max_drain_clk_mhz = 
PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
+   /* fclk wil be used to select WM*/
+
+   if (ranges->reader_wm_sets[num_valid_sets].wm_type == 
WM_TYPE_PSTATE_CHG) {
+   if (i == 0)
+   
ranges->reader_wm_sets[num_valid_sets].min_fill_clk_mhz = 0;
+   else {
+   /* add 1 to make it non-overlapping with next 
lvl */
+   
ranges->reader_wm_sets[num_valid_sets].min_fill_clk_mhz = 
bw_params->clk_table.entries[i - 1].fclk_mhz + 1;
+   }
+   ranges->reader_wm_sets[num_valid_sets].max_fill_clk_mhz 
= bw_params->clk_table.entries[i].fclk_mhz;
+
+   } else {
+   /* unconstrained for memory retraining */
+   ranges->reader_wm_sets[num_valid_sets].min_fill_clk_mhz 
= PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
+   ranges->reader_wm_sets[num_valid_sets].max_fill_clk_mhz 
= PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
+
+   /* Modify previous watermark range to cover up to max */
+   ranges->reader_wm_sets[num_valid_sets - 
1].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
+   }
+   num_valid_sets++;
+   }
+
+   ASSERT(num_valid_sets != 0); /* Must have at least one set of valid 
watermarks */
+   ranges->num_reader_wm_sets = num_valid_sets;
+
+   /* modify the min and max to make sure we cover the whole range*/
+   ranges->reader_wm_sets[0].min_drain_clk_mhz = 
PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
+   ranges->reader_wm_sets[0].min_fill_clk_mhz = 
PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
+   ranges->reader_wm_sets[ranges->num_reader_wm_sets - 
1].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
+   ranges->reader_wm_sets[ranges->num_reader_wm_sets - 1].max_fill_clk_mhz 
= PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
+
+   /* This is for writeback only, does not matter currently as no 
writeback support*/
+   ranges->num_writer_wm_sets = 1;
+   ranges->writer_wm_sets[0].wm_inst = WM_A;
+   ranges->writer_wm_sets[0].min_fill_clk_mhz = 
PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
+   ranges->writer_wm_sets[0].max_fill_clk_mhz = 
PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
+   ranges->writer_wm_sets[0].min_drain_clk_mhz = 
PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
+   ranges->writer_wm_sets[0].max_drain_clk_mhz = 
PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
+
+}
+
+static void rn_notify_wm_ranges(struct clk_mgr *clk_mgr_base)
+{
+   struct dc_debug_options *debug = &clk_mgr_base->ctx->dc->debug;
+   struct pp_smu_wm_range_sets ranges = {0};
+   struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+   struct pp_smu_funcs *pp_smu = clk_mgr->pp_smu;
+
+   if (!debug->disable_pplib_wm_range) {
+   build_watermark_ranges(clk_mgr_base->bw_params, &ranges);
+
+   /* Notify PP Lib/SMU which Watermarks to use for which clock 
ranges */
+   if (pp_smu && pp_smu->rn_funcs.set_wm_ranges

[PATCH 30/37] drm/amd/display: Make clk mgr the only dto update point

2019-10-17 Thread sunpeng.li
From: Noah Abradjian 

[Why]

* Clk Mgr DTO update point did not cover all needed updates, as it included a
  check for plane_state which does not exist yet when the updater is called on
  driver startup
* This resulted in another update path in the pipe programming sequence, based
  on a dppclk update flag
* However, this alternate path allowed for stray DTO updates, some of which 
would
  occur in the wrong order during dppclk lowering and cause underflow

[How]

* Remove plane_state check and use of plane_res.dpp->inst, getting rid
  of sequence dependencies (this results in extra dto programming for unused
  pipes but that doesn't cause issues and is a small cost)
* Allow DTOs to be updated even if global clock is equal, to account for
  edge case exposed by diags tests
* Remove update_dpp_dto call in pipe programming sequence (leave update to
  dppclk_control there, as that update is necessary and shouldn't occur in clk
  mgr)
* Remove call to optimize_bandwidth when committing state, as it is not needed
  and resulted in sporadic underflows even with other fixes in place

Signed-off-by: Noah Abradjian 
Reviewed-by: Jun Lei 
Acked-by: Leo Li 
---
 .../amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c   | 14 +-
 .../drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c  |  3 ++-
 drivers/gpu/drm/amd/display/dc/core/dc.c   |  4 
 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c |  8 +---
 4 files changed, 12 insertions(+), 17 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c 
b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
index 607d8afc56ec..25d7b7c6681c 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
@@ -108,11 +108,12 @@ void dcn20_update_clocks_update_dpp_dto(struct 
clk_mgr_internal *clk_mgr,
for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) {
int dpp_inst, dppclk_khz;
 
-   if (!context->res_ctx.pipe_ctx[i].plane_state)
-   continue;
-
-   dpp_inst = context->res_ctx.pipe_ctx[i].plane_res.dpp->inst;
+   /* Loop index will match dpp->inst if resource exists,
+* and we want to avoid dependency on dpp object
+*/
+   dpp_inst = i;
dppclk_khz = 
context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz;
+
clk_mgr->dccg->funcs->update_dpp_dto(
clk_mgr->dccg, dpp_inst, dppclk_khz);
}
@@ -235,6 +236,7 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
 
update_dispclk = true;
}
+
if (dc->config.forced_clocks == false || (force_reset && 
safe_to_lower)) {
if (dpp_clock_lowered) {
// if clock is being lowered, increase DTO before 
lowering refclk
@@ -244,10 +246,12 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
// if clock is being raised, increase refclk before 
lowering DTO
if (update_dppclk || update_dispclk)
dcn20_update_clocks_update_dentist(clk_mgr);
-   if (update_dppclk)
+   // always update dtos unless clock is lowered and not 
safe to lower
+   if (new_clocks->dppclk_khz >= 
dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz)
dcn20_update_clocks_update_dpp_dto(clk_mgr, 
context);
}
}
+
if (update_dispclk &&
dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) {
/*update dmcu for wait_loop count*/
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c 
b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
index f64d221ad6f1..790a2d211bd6 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
@@ -171,7 +171,8 @@ void rn_update_clocks(struct clk_mgr *clk_mgr_base,
// if clock is being raised, increase refclk before lowering DTO
if (update_dppclk || update_dispclk)
rn_vbios_smu_set_dppclk(clk_mgr, 
clk_mgr_base->clks.dppclk_khz);
-   if (update_dppclk)
+   // always update dtos unless clock is lowered and not safe to 
lower
+   if (new_clocks->dppclk_khz >= 
dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz)
dcn20_update_clocks_update_dpp_dto(clk_mgr, context);
}
 
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c 
b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 5e487bb82861..0a443348df10 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -1241,10 +1241,6 @@ static enum dc_status dc_commit_state_no_check(struct dc 
*dc, struct dc_state *c
 
   

[PATCH 33/37] drm/amd/display: Add center mode for integer scaling in DC

2019-10-17 Thread sunpeng.li
From: Reza Amini 

[why]
We want to use maximum space on display to show source

[how]
For Centered Mode: Replicate source as many times as possible to use
maximum of display active space add borders.

Signed-off-by: Reza Amini 
Reviewed-by: Anthony Koo 
Acked-by: Leo Li 
---
 .../gpu/drm/amd/display/dc/core/dc_resource.c | 43 +++
 1 file changed, 35 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 8fe39fdefc27..70e601a975df 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -951,7 +951,7 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx 
*pipe_ctx)
data->inits.v_c_bot = dc_fixpt_add(data->inits.v_c, 
data->ratios.vert_c);
 
 }
-static bool are_rect_integer_multiples(struct rect src, struct rect dest)
+static bool are_rects_integer_multiples(struct rect src, struct rect dest)
 {
if (dest.width  >= src.width  && dest.width  % src.width  == 0 &&
dest.height >= src.height && dest.height % src.height == 0)
@@ -959,6 +959,38 @@ static bool are_rect_integer_multiples(struct rect src, 
struct rect dest)
 
return false;
 }
+
+void calculate_integer_scaling(struct pipe_ctx *pipe_ctx)
+{
+   if (!pipe_ctx->plane_state->scaling_quality.integer_scaling)
+   return;
+
+   //for Centered Mode
+   if (pipe_ctx->stream->dst.width  == pipe_ctx->stream->src.width &&
+   pipe_ctx->stream->dst.height == pipe_ctx->stream->src.height) {
+   // calculate maximum # of replication of src onto addressable
+   unsigned int integer_multiple = min(
+   pipe_ctx->stream->timing.h_addressable / 
pipe_ctx->stream->src.width,
+   pipe_ctx->stream->timing.v_addressable  / 
pipe_ctx->stream->src.height);
+
+   //scale dst
+   pipe_ctx->stream->dst.width  = integer_multiple * 
pipe_ctx->stream->src.width;
+   pipe_ctx->stream->dst.height = integer_multiple * 
pipe_ctx->stream->src.height;
+
+   //center dst onto addressable
+   pipe_ctx->stream->dst.x = 
(pipe_ctx->stream->timing.h_addressable - pipe_ctx->stream->dst.width)/2;
+   pipe_ctx->stream->dst.y = 
(pipe_ctx->stream->timing.v_addressable - pipe_ctx->stream->dst.height)/2;
+   }
+
+   //disable taps if src & dst are integer ratio
+   if (are_rects_integer_multiples(pipe_ctx->stream->src, 
pipe_ctx->stream->dst)) {
+   pipe_ctx->plane_state->scaling_quality.v_taps = 1;
+   pipe_ctx->plane_state->scaling_quality.h_taps = 1;
+   pipe_ctx->plane_state->scaling_quality.v_taps_c = 1;
+   pipe_ctx->plane_state->scaling_quality.h_taps_c = 1;
+   }
+}
+
 bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
 {
const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
@@ -972,6 +1004,8 @@ bool resource_build_scaling_params(struct pipe_ctx 
*pipe_ctx)
pipe_ctx->plane_res.scl_data.format = 
convert_pixel_format_to_dalsurface(
pipe_ctx->plane_state->format);
 
+   calculate_integer_scaling(pipe_ctx);
+
calculate_scaling_ratios(pipe_ctx);
 
calculate_viewport(pipe_ctx);
@@ -1002,13 +1036,6 @@ bool resource_build_scaling_params(struct pipe_ctx 
*pipe_ctx)
res = 
pipe_ctx->plane_res.dpp->funcs->dpp_get_optimal_number_of_taps(
pipe_ctx->plane_res.dpp, 
&pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality);
 
-   if (res &&
-   plane_state->scaling_quality.integer_scaling &&
-   are_rect_integer_multiples(pipe_ctx->plane_res.scl_data.viewport,
-  pipe_ctx->plane_res.scl_data.recout)) {
-   pipe_ctx->plane_res.scl_data.taps.v_taps = 1;
-   pipe_ctx->plane_res.scl_data.taps.h_taps = 1;
-   }
 
if (!res) {
/* Try 24 bpp linebuffer */
-- 
2.23.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 32/37] drm/amd/display: take signal type from link

2019-10-17 Thread sunpeng.li
From: Lewis Huang 

[Why]
Signal is update to EDP when driver disable first encoder. The
following encoder using SIGNAL_TYPE_EDP to handle other
device. When encoder signal is HDMI, driver will detect it is dp
and release phy. It cause hw hang.

[How]
Take signal type from link->connector_signal.

Signed-off-by: Lewis Huang 
Reviewed-by: Eric Yang 
Acked-by: Leo Li 
---
 .../drm/amd/display/dc/dce110/dce110_hw_sequencer.c   | 11 +++
 1 file changed, 3 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c 
b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 9150e546dcf2..f0e837d14000 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -1421,8 +1421,6 @@ static enum dc_status apply_single_controller_ctx_to_hw(
 static void power_down_encoders(struct dc *dc)
 {
int i;
-   enum connector_id connector_id;
-   enum signal_type signal = SIGNAL_TYPE_NONE;
 
/* do not know BIOS back-front mapping, simply blank all. It will not
 * hurt for non-DP
@@ -1433,15 +1431,12 @@ static void power_down_encoders(struct dc *dc)
}
 
for (i = 0; i < dc->link_count; i++) {
-   connector_id = 
dal_graphics_object_id_get_connector_id(dc->links[i]->link_id);
-   if ((connector_id == CONNECTOR_ID_DISPLAY_PORT) ||
-   (connector_id == CONNECTOR_ID_EDP)) {
+   enum signal_type signal = dc->links[i]->connector_signal;
 
+   if ((signal == SIGNAL_TYPE_EDP) ||
+   (signal == SIGNAL_TYPE_DISPLAY_PORT))
if (!dc->links[i]->wa_flags.dp_keep_receiver_powered)
dp_receiver_power_ctrl(dc->links[i], false);
-   if (connector_id == CONNECTOR_ID_EDP)
-   signal = SIGNAL_TYPE_EDP;
-   }
 
dc->links[i]->link_enc->funcs->disable_output(
dc->links[i]->link_enc, signal);
-- 
2.23.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 12/37] drm/amd/display: correctly initialize dml odm variables

2019-10-17 Thread sunpeng.li
From: Dmytro Laktyushkin 

One of odm variables was not initialized in dml.

Signed-off-by: Dmytro Laktyushkin 
Reviewed-by: Chris Park 
Acked-by: Leo Li 
Acked-by: Tony Cheng 
---
 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c | 2 +-
 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h | 6 --
 drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c | 2 ++
 3 files changed, 3 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 
b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index ab1fc8c5ed10..4f9c3538fa8c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -2524,7 +2524,7 @@ bool dcn20_fast_validate_bw(
return out;
 }
 
-void dcn20_calculate_wm(
+static void dcn20_calculate_wm(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int *out_pipe_cnt,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h 
b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h
index fe68669a1f0c..dccfe07832e3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h
@@ -150,12 +150,6 @@ void dcn20_calculate_dlg_params(
display_e2e_pipe_params_st *pipes,
int pipe_cnt,
int vlevel);
-void dcn20_calculate_wm(
-   struct dc *dc, struct dc_state *context,
-   display_e2e_pipe_params_st *pipes,
-   int *out_pipe_cnt,
-   int *pipe_split_from,
-   int vlevel);
 
 enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct 
dc_state *context, struct dc_stream_state *stream);
 enum dc_status dcn20_add_stream_to_ctx(struct dc *dc, struct dc_state 
*new_ctx, struct dc_stream_state *dc_stream);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c 
b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
index 362dc6ea98ae..038701d7383d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
@@ -432,6 +432,8 @@ static void fetch_pipe_params(struct display_mode_lib 
*mode_lib)
dst->recout_width; // TODO: or should this be 
full_recout_width???...maybe only when in hsplit mode?

mode_lib->vba.ODMCombineEnabled[mode_lib->vba.NumberOfActivePlanes] =
dst->odm_combine;
+   
mode_lib->vba.ODMCombineTypeEnabled[mode_lib->vba.NumberOfActivePlanes] =
+   dst->odm_combine;
mode_lib->vba.OutputFormat[mode_lib->vba.NumberOfActivePlanes] =
(enum output_format_class) 
(dout->output_format);
mode_lib->vba.OutputBpp[mode_lib->vba.NumberOfActivePlanes] =
-- 
2.23.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 34/37] drm/amd/display: Do not call update bounding box on dc create

2019-10-17 Thread sunpeng.li
From: Sung Lee 

[Why]
In Hybrid Graphics, dcn2_1_soc struct stays alive through PnP.
This causes an issue on dc init where dcn2_1_soc which has been
updated by update_bw_bounding_box gets put into dml->soc.
As update_bw_bounding_box is currently incorrect for dcn2.1,
this makes dml calculations fail due to incorrect parameters,
leading to a crash on PnP.

[How]
Comment out update_bw_bounding_box call for now.

Signed-off-by: Sung Lee 
Reviewed-by: Eric Yang 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c | 8 +++-
 1 file changed, 7 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 
b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
index f165f7e58da9..88f89d073061 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
@@ -1336,6 +1336,12 @@ struct display_stream_compressor *dcn21_dsc_create(
 
 static void update_bw_bounding_box(struct dc *dc, struct clk_bw_params 
*bw_params)
 {
+   /*
+   TODO: Fix this function to calcualte correct values.
+   There are known issues with this function currently
+   that will need to be investigated. Use hardcoded known good values for 
now.
+
+
struct dcn21_resource_pool *pool = TO_DCN21_RES_POOL(dc->res_pool);
struct clk_limit_table *clk_table = &bw_params->clk_table;
int i;
@@ -1350,11 +1356,11 @@ static void update_bw_bounding_box(struct dc *dc, 
struct clk_bw_params *bw_param
dcn2_1_soc.clock_limits[i].dcfclk_mhz = 
clk_table->entries[i].dcfclk_mhz;
dcn2_1_soc.clock_limits[i].fabricclk_mhz = 
clk_table->entries[i].fclk_mhz;
dcn2_1_soc.clock_limits[i].socclk_mhz = 
clk_table->entries[i].socclk_mhz;
-   /* This is probably wrong, TODO: find correct calculation */
dcn2_1_soc.clock_limits[i].dram_speed_mts = 
clk_table->entries[i].memclk_mhz * 16 / 1000;
}
dcn2_1_soc.clock_limits[i] = dcn2_1_soc.clock_limits[i - i];
dcn2_1_soc.num_states = i;
+   */
 }
 
 /* Temporary Place holder until we can get them from fuse */
-- 
2.23.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 36/37] drm/amd/display: fix hubbub deadline programing

2019-10-17 Thread sunpeng.li
From: Eric Yang 

[Why]
Fix the programming of DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A.
Was not filled in.

Signed-off-by: Eric Yang 
Reviewed-by: Dmytro Laktyushkin 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c | 1 +
 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c | 1 +
 2 files changed, 2 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 
b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index 3cdb61750570..5e3738e96fdc 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -2634,6 +2634,7 @@ static void dcn20_calculate_wm(
 #if defined(CONFIG_DRM_AMD_DC_DCN2_1)
context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_nom = 
get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_flip = 
get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, 
pipe_cnt) * 1000;
+   context->bw_ctx.bw.dcn.watermarks.b.urgent_latency_ns = 
get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
 #endif
 
if (vlevel < 2) {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 
b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
index 88f89d073061..12a657692d6d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
@@ -1009,6 +1009,7 @@ static void calculate_wm_set_for_vlevel(
 #if defined(CONFIG_DRM_AMD_DC_DCN2_1)
wm_set->frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(dml, pipes, 
pipe_cnt) * 1000;
wm_set->frac_urg_bw_flip = 
get_fraction_of_urgent_bandwidth_imm_flip(dml, pipes, pipe_cnt) * 1000;
+   wm_set->urgent_latency_ns = get_urgent_latency(dml, pipes, pipe_cnt) * 
1000;
 #endif
dml->soc.dram_clock_change_latency_us = 
dram_clock_change_latency_cached;
 
-- 
2.23.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 35/37] drm/amd/display: fix avoid_split for dcn2+ validation

2019-10-17 Thread sunpeng.li
From: Dmytro Laktyushkin 

We are currently incorrectly processing avoid split at highest
voltage level.

Signed-off-by: Dmytro Laktyushkin 
Reviewed-by: Eric Bernstein 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c | 11 +++
 1 file changed, 7 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 
b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index a1b2db8f687a..3cdb61750570 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -2357,10 +2357,11 @@ int dcn20_validate_apply_pipe_split_flags(
int vlevel,
bool *split)
 {
-   int i, pipe_idx, vlevel_unsplit;
+   int i, pipe_idx, vlevel_split;
bool force_split = false;
bool avoid_split = dc->debug.pipe_split_policy != MPC_SPLIT_DYNAMIC;
 
+   /* Single display loop, exits if there is more than one display */
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
bool exit_loop = false;
@@ -2391,22 +2392,24 @@ int dcn20_validate_apply_pipe_split_flags(
if (context->stream_count > dc->res_pool->pipe_count / 2)
avoid_split = true;
 
+   /* Avoid split loop looks for lowest voltage level that allows most 
unsplit pipes possible */
if (avoid_split) {
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
if (!context->res_ctx.pipe_ctx[i].stream)
continue;
 
-   for (vlevel_unsplit = vlevel; vlevel <= 
context->bw_ctx.dml.soc.num_states; vlevel++)
+   for (vlevel_split = vlevel; vlevel <= 
context->bw_ctx.dml.soc.num_states; vlevel++)
if 
(context->bw_ctx.dml.vba.NoOfDPP[vlevel][0][pipe_idx] == 1)
break;
/* Impossible to not split this pipe */
-   if (vlevel == context->bw_ctx.dml.soc.num_states)
-   vlevel = vlevel_unsplit;
+   if (vlevel > context->bw_ctx.dml.soc.num_states)
+   vlevel = vlevel_split;
pipe_idx++;
}
context->bw_ctx.dml.vba.maxMpcComb = 0;
}
 
+   /* Split loop sets which pipe should be split based on dml outputs and 
dc flags */
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
 
-- 
2.23.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 07/37] drm/amd/display: 3.2.55

2019-10-17 Thread sunpeng.li
From: Aric Cyr 

Signed-off-by: Aric Cyr 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/dc.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dc.h 
b/drivers/gpu/drm/amd/display/dc/dc.h
index 2e1d34882684..a86dad3808b6 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -39,7 +39,7 @@
 #include "inc/hw/dmcu.h"
 #include "dml/display_mode_lib.h"
 
-#define DC_VER "3.2.54"
+#define DC_VER "3.2.55"
 
 #define MAX_SURFACES 3
 #define MAX_PLANES 6
-- 
2.23.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 21/37] drm/amd/display: add embedded flag to dml

2019-10-17 Thread sunpeng.li
From: Dmytro Laktyushkin 

Signed-off-by: Dmytro Laktyushkin 
Reviewed-by: Eric Bernstein 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h | 1 +
 drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c | 1 +
 drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h | 1 +
 3 files changed, 3 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h 
b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
index 83f84cdd4055..cfacd6027467 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
@@ -318,6 +318,7 @@ struct _vcs_dpi_display_pipe_dest_params_st {
unsigned int vupdate_width;
unsigned int vready_offset;
unsigned char interlaced;
+   unsigned char embedded;
double pixel_rate_mhz;
unsigned char synchronized_vblank_all_planes;
unsigned char otg_inst;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c 
b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
index 038701d7383d..7f9a5621922f 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
@@ -375,6 +375,7 @@ static void fetch_pipe_params(struct display_mode_lib 
*mode_lib)
 
mode_lib->vba.pipe_plane[j] = 
mode_lib->vba.NumberOfActivePlanes;
 
+   mode_lib->vba.EmbeddedPanel[mode_lib->vba.NumberOfActivePlanes] 
= dst->embedded;
mode_lib->vba.DPPPerPlane[mode_lib->vba.NumberOfActivePlanes] = 
1;
mode_lib->vba.SourceScan[mode_lib->vba.NumberOfActivePlanes] =
(enum scan_direction_class) (src->source_scan);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h 
b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
index 91decac50557..1540ffbe3979 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
@@ -387,6 +387,7 @@ struct vba_vars_st {
 
/* vba mode support */
/*inputs*/
+   bool EmbeddedPanel[DC__NUM_DPP__MAX];
bool SupportGFX7CompatibleTilingIn32bppAnd64bpp;
double MaxHSCLRatio;
double MaxVSCLRatio;
-- 
2.23.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 15/37] drm/amd/display: Fix MPO & pipe split on 3-pipe dcn2x

2019-10-17 Thread sunpeng.li
From: Michael Strauss 

[WHY]
DML is incorrectly initialized with 4 pipes on 3 pipe configs
RequiredDPPCLK is halved on unsplit pipe due to an incorrectly handled 3 pipe
case, causing underflow with 2 planes & pipe split (MPO, 8K + 2nd display)

[HOW]
Set correct number of DPP/OTGs for dml init to generate correct DPP topology
Double RequiredDPPCLK after clock is halved for pipe split
and find_secondary_pipe fails to fix underflow

Signed-off-by: Michael Strauss 
Reviewed-by: Dmytro Laktyushkin 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c | 5 +++--
 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c | 8 
 2 files changed, 11 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 
b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index 9bc0ffad7093..2596d4ac6263 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -2482,9 +2482,10 @@ bool dcn20_fast_validate_bw(
/* pipe not split previously needs split */
hsplit_pipe = dcn20_find_secondary_pipe(dc, 
&context->res_ctx, dc->res_pool, pipe);
ASSERT(hsplit_pipe);
-   if (!hsplit_pipe)
+   if (!hsplit_pipe) {
+   
context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx]
 *= 2;
continue;
-
+   }
if 
(context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx]) {
if (!dcn20_split_stream_for_odm(
&context->res_ctx, 
dc->res_pool,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 
b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
index d2e851e7a97f..5e3b48bb04f1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
@@ -1722,6 +1722,14 @@ static bool construct(
 
pool->base.pp_smu = dcn21_pp_smu_create(ctx);
 
+   uint32_t num_pipes = dcn2_1_ip.max_num_dpp;
+
+   for (i = 0; i < dcn2_1_ip.max_num_dpp; i++)
+   if (pipe_fuses & 1 << i)
+   num_pipes--;
+   dcn2_1_ip.max_num_dpp = num_pipes;
+   dcn2_1_ip.max_num_otg = num_pipes;
+
dml_init_instance(&dc->dml, &dcn2_1_soc, &dcn2_1_ip, DML_PROJECT_DCN21);
 
init_data.ctx = dc->ctx;
-- 
2.23.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 26/37] drm/amd/display: Proper return of result when aux engine acquire fails

2019-10-17 Thread sunpeng.li
From: Anthony Koo 

[Why]
When aux engine acquire fails, we missed populating the operation_result
that describes the failure reason.

[How]
Set operation_result to new type:
AUX_CHANNEL_OPERATION_FAILED_ENGINE_ACQUIRE
in the case aux engine acquire has failed.

Signed-off-by: Anthony Koo 
Reviewed-by: Aric Cyr 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c | 1 +
 drivers/gpu/drm/amd/display/dc/dc_ddc_types.h   | 3 ++-
 drivers/gpu/drm/amd/display/dc/dce/dce_aux.c| 5 -
 3 files changed, 7 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 3af2b429ff1b..779d0b60cac9 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -113,6 +113,7 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
result = -EIO;
break;
case AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY:
+   case AUX_CHANNEL_OPERATION_FAILED_ENGINE_ACQUIRE:
result = -EBUSY;
break;
case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT:
diff --git a/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h 
b/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h
index 4ef97f65e55d..4f8f576d5fcf 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h
@@ -49,7 +49,8 @@ enum aux_channel_operation_result {
AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN,
AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY,
AUX_CHANNEL_OPERATION_FAILED_TIMEOUT,
-   AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON
+   AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON,
+   AUX_CHANNEL_OPERATION_FAILED_ENGINE_ACQUIRE
 };
 
 
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c 
b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
index 976bd4987a28..22abb345ddc1 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
@@ -538,8 +538,10 @@ int dce_aux_transfer_raw(struct ddc_service *ddc,
memset(&aux_rep, 0, sizeof(aux_rep));
 
aux_engine = ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en];
-   if (!acquire(aux_engine, ddc_pin))
+   if (!acquire(aux_engine, ddc_pin)) {
+   *operation_result = AUX_CHANNEL_OPERATION_FAILED_ENGINE_ACQUIRE;
return -1;
+   }
 
if (payload->i2c_over_aux)
aux_req.type = AUX_TRANSACTION_TYPE_I2C;
@@ -663,6 +665,7 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
break;
 
case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON:
+   case AUX_CHANNEL_OPERATION_FAILED_ENGINE_ACQUIRE:
case AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN:
default:
goto fail;
-- 
2.23.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 09/37] drm/amd/display: Enable PSR

2019-10-17 Thread sunpeng.li
From: Roman Li 

[Why]
PSR (Panel Self-Refresh) is a power-saving feature for eDP panels.
The feature has support in DMCU (Display MicroController Unit).
DMCU/driver communication is implemented in DC.
DM can use existing DC PSR interface to use PSR feature.

[How]
- Read psr caps via dpcd
- Send vsc infoframe if panel supports psr
- Disable psr before h/w programming (FULL_UPDATE)
- Enable psr after h/w programming
- Disable psr for fb console

Signed-off-by: Roman Li 
Reviewed-by: Nicholas Kazlauskas 
Acked-by: Leo Li 
---
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 133 +-
 1 file changed, 130 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 10cce584719f..5e3bf4f86e52 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -147,6 +147,12 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
 static void handle_cursor_update(struct drm_plane *plane,
 struct drm_plane_state *old_plane_state);
 
+static void amdgpu_dm_set_psr_caps(struct dc_link *link);
+static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
+static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
+static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
+
+
 /*
  * dm_vblank_get_counter
  *
@@ -2418,6 +2424,7 @@ static int amdgpu_dm_initialize_drm_device(struct 
amdgpu_device *adev)
} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
amdgpu_dm_update_connector_after_detect(aconnector);
register_backlight_device(dm, link);
+   amdgpu_dm_set_psr_caps(link);
}
 
 
@@ -3813,7 +3820,16 @@ create_stream_for_sink(struct amdgpu_dm_connector 
*aconnector,
 
if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket, 
false, false);
+   if (stream->link->psr_feature_enabled)  {
+   struct dc  *core_dc = stream->link->ctx->dc;
 
+   if (dc_is_dmcu_initialized(core_dc)) {
+   struct dmcu *dmcu = core_dc->res_pool->dmcu;
+
+   stream->psr_version = dmcu->dmcu_version.psr_version;
+   mod_build_vsc_infopacket(stream, 
&stream->vsc_infopacket);
+   }
+   }
 finish:
dc_sink_release(sink);
 
@@ -5908,6 +5924,7 @@ static void amdgpu_dm_commit_planes(struct 
drm_atomic_state *state,
uint32_t target_vblank, last_flip_vblank;
bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
bool pflip_present = false;
+   bool swizzle = true;
struct {
struct dc_surface_update surface_updates[MAX_SURFACES];
struct dc_plane_info plane_infos[MAX_SURFACES];
@@ -5953,6 +5970,9 @@ static void amdgpu_dm_commit_planes(struct 
drm_atomic_state *state,
 
dc_plane = dm_new_plane_state->dc_state;
 
+   if (dc_plane && !dc_plane->tiling_info.gfx9.swizzle)
+   swizzle = false;
+
bundle->surface_updates[planes_count].surface = dc_plane;
if (new_pcrtc_state->color_mgmt_changed) {
bundle->surface_updates[planes_count].gamma = 
dc_plane->gamma_correction;
@@ -6144,14 +6164,29 @@ static void amdgpu_dm_commit_planes(struct 
drm_atomic_state *state,
&acrtc_state->vrr_params.adjust);
spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
}
-
mutex_lock(&dm->dc_lock);
+   if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
+   acrtc_state->stream->link->psr_allow_active)
+   amdgpu_dm_psr_disable(acrtc_state->stream);
+
dc_commit_updates_for_stream(dm->dc,
 bundle->surface_updates,
 planes_count,
 acrtc_state->stream,
 &bundle->stream_update,
 dc_state);
+
+   if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
+   
acrtc_state->stream->psr_version &&
+   
!acrtc_state->stream->link->psr_feature_enabled)
+   amdgpu_dm_link_setup_psr(acrtc_state->stream);
+   else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
+   
acrtc_state->stream->link->psr_feature_enabled &&
+   
!acrtc_state->stream->link->psr_allow_active &&
+   

[PATCH 06/37] drm/amd/display: remove unused code

2019-10-17 Thread sunpeng.li
From: Dmytro Laktyushkin 

Commit hints are unnecessary after front end programming redesign.

Signed-off-by: Dmytro Laktyushkin 
Reviewed-by: Eric Bernstein 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/core/dc.c  | 2 --
 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c | 5 -
 drivers/gpu/drm/amd/display/dc/inc/core_types.h   | 4 
 3 files changed, 11 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c 
b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 41b51f43a64b..55b82ca44c3b 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -1248,8 +1248,6 @@ static enum dc_status dc_commit_state_no_check(struct dc 
*dc, struct dc_state *c
for (i = 0; i < context->stream_count; i++)
context->streams[i]->mode_changed = false;
 
-   memset(&context->commit_hints, 0, sizeof(context->commit_hints));
-
dc_release_state(dc->current_state);
 
dc->current_state = context;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 
b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index c9792c47978a..893e44ca90f8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -2275,7 +2275,6 @@ bool dcn20_fast_validate_bw(
int split_threshold = dc->res_pool->pipe_count / 2;
bool avoid_split = dc->debug.pipe_split_policy != MPC_SPLIT_DYNAMIC;
 
-
ASSERT(pipes);
if (!pipes)
return false;
@@ -2353,10 +2352,6 @@ bool dcn20_fast_validate_bw(
if (vlevel > context->bw_ctx.dml.soc.num_states)
goto validate_fail;
 
-   if ((context->stream_count > split_threshold && 
dc->current_state->stream_count <= split_threshold)
-   || (context->stream_count <= split_threshold && 
dc->current_state->stream_count > split_threshold))
-   context->commit_hints.full_update_needed = true;
-
/*initialize pipe_just_split_from to invalid idx*/
for (i = 0; i < MAX_PIPES; i++)
pipe_split_from[i] = -1;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h 
b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index eee78a73d88c..a831079607cd 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -398,10 +398,6 @@ struct dc_state {
 
struct clk_mgr *clk_mgr;
 
-   struct {
-   bool full_update_needed : 1;
-   } commit_hints;
-
struct kref refcount;
 };
 
-- 
2.23.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 31/37] drm/amd/display: 3.2.56

2019-10-17 Thread sunpeng.li
From: Aric Cyr 

Signed-off-by: Aric Cyr 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/dc.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dc.h 
b/drivers/gpu/drm/amd/display/dc/dc.h
index b578b2148e45..0416a17b0897 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -39,7 +39,7 @@
 #include "inc/hw/dmcu.h"
 #include "dml/display_mode_lib.h"
 
-#define DC_VER "3.2.55"
+#define DC_VER "3.2.56"
 
 #define MAX_SURFACES 3
 #define MAX_PLANES 6
-- 
2.23.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 37/37] drm/amd/display: Apply vactive dram clock change workaround to dcn2 DMLv2

2019-10-17 Thread sunpeng.li
From: Joshua Aberback 

[Why]
This workaround was put in dcn2 DMLv1, and now we need it in DMLv2.

Signed-off-by: Joshua Aberback 
Reviewed-by: Jun Lei 
Acked-by: Leo Li 
---
 .../gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c   | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c 
b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
index 841ed6c23f93..3c70dd577292 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
@@ -2611,7 +2611,8 @@ static void 
dml20v2_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndP
mode_lib->vba.MinActiveDRAMClockChangeMargin
+ mode_lib->vba.DRAMClockChangeLatency;
 
-   if (mode_lib->vba.MinActiveDRAMClockChangeMargin > 0) {
+   if (mode_lib->vba.MinActiveDRAMClockChangeMargin > 50) {
+   mode_lib->vba.DRAMClockChangeWatermark += 25;
mode_lib->vba.DRAMClockChangeSupport[0][0] = 
dm_dram_clock_change_vactive;
} else {
if (mode_lib->vba.SynchronizedVBlank || 
mode_lib->vba.NumberOfActivePlanes == 1) {
-- 
2.23.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 29/37] drm/amd/display: Only use EETF when maxCL > max display

2019-10-17 Thread sunpeng.li
From: Krunoslav Kovac 

[Why&How]
BT.2390 EETF is used for tone mapping/range reduction.
Say display is 0.1 - 500 nits.
The problematic case is when content is 0-400. We apply EETF because
0<0.1 so we need to reduce the range by 0.1.

In the commit, we ignore the bottom range. Most displays map 0 to min and
then have a ramp to 0.1, so sending 0.1 is actually >0.1.
Furthermode, HW that uses 3D LUT also assumes min=0.

Signed-off-by: Krunoslav Kovac 
Reviewed-by: Aric Cyr 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/modules/color/color_gamma.c | 6 +-
 1 file changed, 1 insertion(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c 
b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
index 85dad356c9d5..1de4805cb8c7 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
@@ -959,11 +959,7 @@ static bool build_freesync_hdr(struct pwl_float_data_ex 
*rgb_regamma,
if (fs_params->max_display < 100) // cap at 100 at the top
max_display = dc_fixpt_from_int(100);
 
-   if (fs_params->min_content < fs_params->min_display)
-   use_eetf = true;
-   else
-   min_content = min_display;
-
+   // only max used, we don't adjust min luminance
if (fs_params->max_content > fs_params->max_display)
use_eetf = true;
else
-- 
2.23.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 27/37] drm/amd/display: do not synchronize "drr" displays

2019-10-17 Thread sunpeng.li
From: Jun Lei 

[why]
A display that supports DRR can never really be considered
"synchronized" with any other display because we can dynamically
enable DRR (i.e. without modeset).  this will cause their
relative CRTC positions to drift and lose sync.  this will disrupt
features such as MCLK switching that assume and depend on
their permanent alignment (that can only change with modeset)

[how]
check for ignore_msa in stream when considered synchronizability
this ignore_msa is basically actually implemented as "supports drr"

Signed-off-by: Jun Lei 
Reviewed-by: Yongqiang Sun 
Acked-by: Anthony Koo 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 6 ++
 1 file changed, 6 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index da9e2e5f5c0d..8fe39fdefc27 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -404,6 +404,9 @@ bool resource_are_streams_timing_synchronizable(
if (stream1->view_format != stream2->view_format)
return false;
 
+   if (stream1->ignore_msa_timing_param || 
stream2->ignore_msa_timing_param)
+   return false;
+
return true;
 }
 static bool is_dp_and_hdmi_sharable(
@@ -1540,6 +1543,9 @@ bool dc_is_stream_unchanged(
if (!are_stream_backends_same(old_stream, stream))
return false;
 
+   if (old_stream->ignore_msa_timing_param != 
stream->ignore_msa_timing_param)
+   return false;
+
return true;
 }
 
-- 
2.23.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 16/37] drm/amd/display: audio endpoint cannot switch

2019-10-17 Thread sunpeng.li
From: Paul Hsieh 

[Why]
On some systems, we need to check the dcn version in runtime
system, not in compile time.

[How]
Stub in dcn version parameter to find_first_free_audio

Signed-off-by: Paul Hsieh 
Reviewed-by: Charlene Liu 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 25da0c45d828..da9e2e5f5c0d 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -1629,7 +1629,8 @@ static int acquire_first_free_pipe(
 static struct audio *find_first_free_audio(
struct resource_context *res_ctx,
const struct resource_pool *pool,
-   enum engine_id id)
+   enum engine_id id,
+   enum dce_version dc_version)
 {
int i, available_audio_count;
 
@@ -1965,7 +1966,7 @@ enum dc_status resource_map_pool_resources(
dc_is_audio_capable_signal(pipe_ctx->stream->signal) &&
stream->audio_info.mode_count && stream->audio_info.flags.all) {
pipe_ctx->stream_res.audio = find_first_free_audio(
-   &context->res_ctx, pool, pipe_ctx->stream_res.stream_enc->id);
+   &context->res_ctx, pool, pipe_ctx->stream_res.stream_enc->id, 
dc_ctx->dce_version);
 
/*
 * Audio assigned in order first come first get.
-- 
2.23.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 11/37] drm/amd/display: split dcn20 fast validate into more functions

2019-10-17 Thread sunpeng.li
From: Dmytro Laktyushkin 

Split a large function into smaller, reusable chunks.

Signed-off-by: Dmytro Laktyushkin 
Reviewed-by: Nevenko Stupar 
Acked-by: Leo Li 
---
 .../drm/amd/display/dc/dcn20/dcn20_resource.c | 182 ++
 .../drm/amd/display/dc/dcn20/dcn20_resource.h |  31 +++
 .../drm/amd/display/dc/dcn21/dcn21_resource.c |   1 +
 3 files changed, 136 insertions(+), 78 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 
b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index 893e44ca90f8..ab1fc8c5ed10 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -1612,7 +1612,7 @@ static void swizzle_to_dml_params(
}
 }
 
-static bool dcn20_split_stream_for_odm(
+bool dcn20_split_stream_for_odm(
struct resource_context *res_ctx,
const struct resource_pool *pool,
struct pipe_ctx *prev_odm_pipe,
@@ -1690,7 +1690,7 @@ static bool dcn20_split_stream_for_odm(
return true;
 }
 
-static void dcn20_split_stream_for_mpc(
+void dcn20_split_stream_for_mpc(
struct resource_context *res_ctx,
const struct resource_pool *pool,
struct pipe_ctx *primary_pipe,
@@ -2148,7 +2148,7 @@ void dcn20_set_mcif_arb_params(
 }
 
 #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
-static bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx)
+bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx)
 {
int i;
 
@@ -2183,7 +2183,7 @@ static bool dcn20_validate_dsc(struct dc *dc, struct 
dc_state *new_ctx)
 }
 #endif
 
-static struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc,
+struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc,
struct resource_context *res_ctx,
const struct resource_pool *pool,
const struct pipe_ctx *primary_pipe)
@@ -2260,24 +2260,11 @@ static struct pipe_ctx 
*dcn20_find_secondary_pipe(struct dc *dc,
return secondary_pipe;
 }
 
-bool dcn20_fast_validate_bw(
+void dcn20_merge_pipes_for_validate(
struct dc *dc,
-   struct dc_state *context,
-   display_e2e_pipe_params_st *pipes,
-   int *pipe_cnt_out,
-   int *pipe_split_from,
-   int *vlevel_out)
+   struct dc_state *context)
 {
-   bool out = false;
-
-   int pipe_cnt, i, pipe_idx, vlevel, vlevel_unsplit;
-   bool force_split = false;
-   int split_threshold = dc->res_pool->pipe_count / 2;
-   bool avoid_split = dc->debug.pipe_split_policy != MPC_SPLIT_DYNAMIC;
-
-   ASSERT(pipes);
-   if (!pipes)
-   return false;
+   int i;
 
/* merge previously split odm pipes since mode support needs to make 
the decision */
for (i = 0; i < dc->res_pool->pipe_count; i++) {
@@ -2332,31 +2319,18 @@ bool dcn20_fast_validate_bw(
if (pipe->plane_state)
resource_build_scaling_params(pipe);
}
+}
 
-   if (dc->res_pool->funcs->populate_dml_pipes)
-   pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc,
-   &context->res_ctx, pipes);
-   else
-   pipe_cnt = dcn20_populate_dml_pipes_from_context(dc,
-   &context->res_ctx, pipes);
-
-   *pipe_cnt_out = pipe_cnt;
-
-   if (!pipe_cnt) {
-   out = true;
-   goto validate_out;
-   }
-
-   vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
-
-   if (vlevel > context->bw_ctx.dml.soc.num_states)
-   goto validate_fail;
-
-   /*initialize pipe_just_split_from to invalid idx*/
-   for (i = 0; i < MAX_PIPES; i++)
-   pipe_split_from[i] = -1;
+int dcn20_validate_apply_pipe_split_flags(
+   struct dc *dc,
+   struct dc_state *context,
+   int vlevel,
+   bool *split)
+{
+   int i, pipe_idx, vlevel_unsplit;
+   bool force_split = false;
+   bool avoid_split = dc->debug.pipe_split_policy != MPC_SPLIT_DYNAMIC;
 
-   /* Single display only conditionals get set here */
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
bool exit_loop = false;
@@ -2383,38 +2357,105 @@ bool dcn20_fast_validate_bw(
if (exit_loop)
break;
}
-
-   if (context->stream_count > split_threshold)
+   /* TODO: fix dc bugs and remove this split threshold thing */
+   if (context->stream_count > dc->res_pool->pipe_count / 2)
avoid_split = true;
 
-   vlevel_unsplit = vlevel;
+   if (avoid_split) {
+   for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
+   if (!context->res_ctx.pipe_ctx[i].stream)
+   continue;
+

[PATCH] drm/amdgpu: Add DC feature mask to disable fractional pwm

2019-10-21 Thread sunpeng.li
From: Leo Li 

[Why]

Some LED panel drivers might not like fractional PWM. In such cases,
backlight flickering may be observed.

[How]

Add a DC feature mask to disable fractional PWM, and associate it with
the preexisting dc_config flag.

The flag is only plumbed through the dmcu firmware, so plumb it through
the driver path as well.

To disable, add the following to the linux cmdline:
amdgpu.dcfeaturemask=0x4

Signed-off-by: Leo Li 
---
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 3 +++
 drivers/gpu/drm/amd/display/dc/dce/dce_abm.c  | 4 
 drivers/gpu/drm/amd/include/amd_shared.h  | 1 +
 3 files changed, 8 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 1cf4beb76835..73f917d4d1e1 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -728,6 +728,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
init_data.flags.multi_mon_pp_mclk_switch = true;
 
+   if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
+   init_data.flags.disable_fractional_pwm = true;
+
init_data.flags.power_down_display_on_boot = true;
 
 #ifdef CONFIG_DRM_AMD_DC_DCN2_0
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c 
b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
index d759fdca7fdb..b8a3fc505c9b 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
@@ -404,6 +404,10 @@ static bool dce_abm_init_backlight(struct abm *abm)
/* Enable the backlight output */
REG_UPDATE(BL_PWM_CNTL, BL_PWM_EN, 1);
 
+   /* Disable fractional pwm if configured */
+   REG_UPDATE(BL_PWM_CNTL, BL_PWM_FRACTIONAL_EN,
+  abm->ctx->dc->config.disable_fractional_pwm ? 0 : 1);
+
/* Unlock group 2 backlight registers */
REG_UPDATE(BL_PWM_GRP1_REG_LOCK,
BL_PWM_GRP1_REG_LOCK, 0);
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h 
b/drivers/gpu/drm/amd/include/amd_shared.h
index 8889aaceec60..5450ed762b7a 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -143,6 +143,7 @@ enum PP_FEATURE_MASK {
 enum DC_FEATURE_MASK {
DC_FBC_MASK = 0x1,
DC_MULTI_MON_PP_MCLK_SWITCH_MASK = 0x2,
+   DC_DISABLE_FRACTIONAL_PWM_MASK = 0x4,
 };
 
 enum amd_dpm_forced_level;
-- 
2.23.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH v2] drm/amdgpu: Add DC feature mask to disable fractional pwm

2019-10-21 Thread sunpeng.li
From: Leo Li 

[Why]

Some LED panel drivers might not like fractional PWM. In such cases,
backlight flickering may be observed.

[How]

Add a DC feature mask to disable fractional PWM, and associate it with
the preexisting dc_config flag.

The flag is only plumbed through the dmcu firmware, so plumb it through
the driver path as well.

To disable, add the following to the linux cmdline:
amdgpu.dcfeaturemask=0x4

Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=204957
Signed-off-by: Leo Li 
---

v2: Add bugzilla link

 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 3 +++
 drivers/gpu/drm/amd/display/dc/dce/dce_abm.c  | 4 
 drivers/gpu/drm/amd/include/amd_shared.h  | 1 +
 3 files changed, 8 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 1cf4beb76835..73f917d4d1e1 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -728,6 +728,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
init_data.flags.multi_mon_pp_mclk_switch = true;
 
+   if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
+   init_data.flags.disable_fractional_pwm = true;
+
init_data.flags.power_down_display_on_boot = true;
 
 #ifdef CONFIG_DRM_AMD_DC_DCN2_0
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c 
b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
index d759fdca7fdb..b8a3fc505c9b 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
@@ -404,6 +404,10 @@ static bool dce_abm_init_backlight(struct abm *abm)
/* Enable the backlight output */
REG_UPDATE(BL_PWM_CNTL, BL_PWM_EN, 1);
 
+   /* Disable fractional pwm if configured */
+   REG_UPDATE(BL_PWM_CNTL, BL_PWM_FRACTIONAL_EN,
+  abm->ctx->dc->config.disable_fractional_pwm ? 0 : 1);
+
/* Unlock group 2 backlight registers */
REG_UPDATE(BL_PWM_GRP1_REG_LOCK,
BL_PWM_GRP1_REG_LOCK, 0);
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h 
b/drivers/gpu/drm/amd/include/amd_shared.h
index 8889aaceec60..5450ed762b7a 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -143,6 +143,7 @@ enum PP_FEATURE_MASK {
 enum DC_FEATURE_MASK {
DC_FBC_MASK = 0x1,
DC_MULTI_MON_PP_MCLK_SWITCH_MASK = 0x2,
+   DC_DISABLE_FRACTIONAL_PWM_MASK = 0x4,
 };
 
 enum amd_dpm_forced_level;
-- 
2.23.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 1/2] drm/amd/display: Send vblank and user events at vsartup for DCN

2019-11-05 Thread sunpeng.li
From: Leo Li 

[Why]

For DCN hardware, the crtc_high_irq handler is assigned to the vstartup
interrupt. This is different from DCE, which has it assigned to vblank
start.

We'd like to send vblank and user events at vstartup because:

* It happens close enough to vupdate - the point of no return for HW.

* It is programmed as lines relative to vblank end - i.e. it is not in
  the variable portion when VRR is enabled. We should signal user
  events here.

* The pflip interrupt responsible for sending user events today only
  fires if the DCH HUBP component is not clock gated. In situations
  where planes are disabled - but the CRTC is enabled - user events won't
  be sent out, leading to flip done timeouts.

Consequently, this makes vupdate on DCN hardware redundant. It will be
removed in the next change.

[How]

Add a DCN-specific crtc_high_irq handler, and hook it to the VStartup
signal. Inside the DCN handler, we send off user events if the pflip
handler hasn't already done so.

Signed-off-by: Leo Li 
---
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 65 ++-
 1 file changed, 64 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 00017b91c91a..256a23a0ec28 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -485,6 +485,69 @@ static void dm_crtc_high_irq(void *interrupt_params)
}
 }

+
+/**
+ * dm_dcn_crtc_high_irq() - Handles VStartup interrupt for DCN generation ASICs
+ * @interrupt params - interrupt parameters
+ *
+ * Notify DRM's vblank event handler at VSTARTUP
+ *
+ * Unlike DCE hardware, we trigger the handler at VSTARTUP. at which:
+ * * We are close enough to VUPDATE - the point of no return for hw
+ * * We are in the fixed portion of variable front porch when vrr is enabled
+ * * We are before VUPDATE, where double-buffered vrr registers are swapped
+ *
+ * It is therefore the correct place to signal vblank, send user flip events,
+ * and update VRR.
+ */
+static void dm_dcn_crtc_high_irq(void *interrupt_params)
+{
+   struct common_irq_params *irq_params = interrupt_params;
+   struct amdgpu_device *adev = irq_params->adev;
+   struct amdgpu_crtc *acrtc;
+   struct dm_crtc_state *acrtc_state;
+   unsigned long flags;
+
+   acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - 
IRQ_TYPE_VBLANK);
+
+   if (!acrtc)
+   return;
+
+   acrtc_state = to_dm_crtc_state(acrtc->base.state);
+
+   DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
+   amdgpu_dm_vrr_active(acrtc_state));
+
+   amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
+   drm_crtc_handle_vblank(&acrtc->base);
+
+   spin_lock_irqsave(&adev->ddev->event_lock, flags);
+
+   if (acrtc_state->vrr_params.supported &&
+   acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
+   mod_freesync_handle_v_update(
+   adev->dm.freesync_module,
+   acrtc_state->stream,
+   &acrtc_state->vrr_params);
+
+   dc_stream_adjust_vmin_vmax(
+   adev->dm.dc,
+   acrtc_state->stream,
+   &acrtc_state->vrr_params.adjust);
+   }
+
+   if (acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED) {
+   if (acrtc->event) {
+   drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
+   acrtc->event = NULL;
+   drm_crtc_vblank_put(&acrtc->base);
+   }
+   acrtc->pflip_status = AMDGPU_FLIP_NONE;
+   }
+
+   spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+}
+
 static int dm_set_clockgating_state(void *handle,
  enum amd_clockgating_state state)
 {
@@ -2175,7 +2238,7 @@ static int dcn10_register_irq_handlers(struct 
amdgpu_device *adev)
c_irq_params->irq_src = int_params.irq_source;

amdgpu_dm_irq_register_interrupt(adev, &int_params,
-   dm_crtc_high_irq, c_irq_params);
+   dm_dcn_crtc_high_irq, c_irq_params);
}

/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
--
2.23.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 2/2] drm/amd/display: Disable VUpdate interrupt for DCN hardware

2019-11-05 Thread sunpeng.li
From: Leo Li 

[Why]

On DCN hardware, the crtc_high_irq handler makes vupdate_high_irq
handler redundant.

All the vupdate handler does is handle vblank events, and update vrr
for DCE hw (excluding VEGA, more on that later). As far as usermode is
concerned. vstartup happens close enough to vupdate on DCN that it can
be considered the "same". Handling vblank and updating vrr at vstartup
effectively replaces vupdate on DCN.

Vega is a bit special. Like DCN, the VRR registers on Vega are
double-buffered, and swapped at vupdate. But Unlike DCN, it lacks a
vstartup interrupt. This means we can't quite remove the vupdate handler
for it, since delayerd user events due to vrr are sent off there.

[How]

Remove registration of VUpdate interrupt handler for DCN. Disable
vupdate interrupt if asic family DCN, enable otherwise.

Signed-off-by: Leo Li 
---
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 34 +++
 1 file changed, 4 insertions(+), 30 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 256a23a0ec28..568df046b2fe 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -2241,34 +2241,6 @@ static int dcn10_register_irq_handlers(struct 
amdgpu_device *adev)
dm_dcn_crtc_high_irq, c_irq_params);
}

-   /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
-* the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
-* to trigger at end of each vblank, regardless of state of the lock,
-* matching DCE behaviour.
-*/
-   for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
-i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + 
adev->mode_info.num_crtc - 1;
-i++) {
-   r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, 
&adev->vupdate_irq);
-
-   if (r) {
-   DRM_ERROR("Failed to add vupdate irq id!\n");
-   return r;
-   }
-
-   int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
-   int_params.irq_source =
-   dc_interrupt_to_irq_source(dc, i, 0);
-
-   c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - 
DC_IRQ_SOURCE_VUPDATE1];
-
-   c_irq_params->adev = adev;
-   c_irq_params->irq_src = int_params.irq_source;
-
-   amdgpu_dm_irq_register_interrupt(adev, &int_params,
-   dm_vupdate_high_irq, c_irq_params);
-   }
-
/* Use GRPH_PFLIP interrupt */
for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + 
adev->mode_info.num_crtc - 1;
@@ -4266,7 +4238,7 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, 
bool enable)
struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
int rc = 0;

-   if (enable) {
+   if (enable && adev->family < AMDGPU_FAMILY_AI) {
/* vblank irq on -> Only need vupdate irq in vrr mode */
if (amdgpu_dm_vrr_active(acrtc_state))
rc = dm_set_vupdate_irq(crtc, true);
@@ -6243,6 +6215,7 @@ static void pre_update_freesync_state_on_stream(
 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
struct dm_crtc_state *new_state)
 {
+   struct amdgpu_device *adev = old_state->base.crtc->dev->dev_private;
bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
bool new_vrr_active = amdgpu_dm_vrr_active(new_state);

@@ -6255,7 +6228,8 @@ static void amdgpu_dm_handle_vrr_transition(struct 
dm_crtc_state *old_state,
 * We also need vupdate irq for the actual core vblank handling
 * at end of vblank.
 */
-   dm_set_vupdate_irq(new_state->base.crtc, true);
+   if (adev->family < AMDGPU_FAMILY_AI)
+   dm_set_vupdate_irq(new_state->base.crtc, true);
drm_crtc_vblank_get(new_state->base.crtc);
DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
 __func__, new_state->base.crtc->base.id);
--
2.23.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 2/2 v2] drm/amd/display: Disable VUpdate interrupt for DCN hardware

2019-11-05 Thread sunpeng.li
From: Leo Li 

[Why]

On DCN hardware, the crtc_high_irq handler makes vupdate_high_irq
handler redundant.

All the vupdate handler does is handle vblank events, and update vrr
for DCE hw (excluding VEGA, more on that later). As far as usermode is
concerned. vstartup happens close enough to vupdate on DCN that it can
be considered the "same". Handling vblank and updating vrr at vstartup
effectively replaces vupdate on DCN.

Vega is a bit special. Like DCN, the VRR registers on Vega are
double-buffered, and swapped at vupdate. But Unlike DCN, it lacks a
vstartup interrupt. This means we can't quite remove the vupdate handler
for it, since delayerd user events due to vrr are sent off there.

[How]

Remove registration of VUpdate interrupt handler for DCN. Disable
vupdate interrupt if asic family DCN, enable otherwise.

Signed-off-by: Leo Li 
---

v2: Don't exclude vega when enabling vupdate interrupts

 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 34 +++
 1 file changed, 4 insertions(+), 30 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 256a23a0ec28..3664af3b41a1 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -2241,34 +2241,6 @@ static int dcn10_register_irq_handlers(struct 
amdgpu_device *adev)
dm_dcn_crtc_high_irq, c_irq_params);
}

-   /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
-* the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
-* to trigger at end of each vblank, regardless of state of the lock,
-* matching DCE behaviour.
-*/
-   for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
-i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + 
adev->mode_info.num_crtc - 1;
-i++) {
-   r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, 
&adev->vupdate_irq);
-
-   if (r) {
-   DRM_ERROR("Failed to add vupdate irq id!\n");
-   return r;
-   }
-
-   int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
-   int_params.irq_source =
-   dc_interrupt_to_irq_source(dc, i, 0);
-
-   c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - 
DC_IRQ_SOURCE_VUPDATE1];
-
-   c_irq_params->adev = adev;
-   c_irq_params->irq_src = int_params.irq_source;
-
-   amdgpu_dm_irq_register_interrupt(adev, &int_params,
-   dm_vupdate_high_irq, c_irq_params);
-   }
-
/* Use GRPH_PFLIP interrupt */
for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + 
adev->mode_info.num_crtc - 1;
@@ -4266,7 +4238,7 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, 
bool enable)
struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
int rc = 0;

-   if (enable) {
+   if (enable && adev->family <= AMDGPU_FAMILY_AI) {
/* vblank irq on -> Only need vupdate irq in vrr mode */
if (amdgpu_dm_vrr_active(acrtc_state))
rc = dm_set_vupdate_irq(crtc, true);
@@ -6243,6 +6215,7 @@ static void pre_update_freesync_state_on_stream(
 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
struct dm_crtc_state *new_state)
 {
+   struct amdgpu_device *adev = old_state->base.crtc->dev->dev_private;
bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
bool new_vrr_active = amdgpu_dm_vrr_active(new_state);

@@ -6255,7 +6228,8 @@ static void amdgpu_dm_handle_vrr_transition(struct 
dm_crtc_state *old_state,
 * We also need vupdate irq for the actual core vblank handling
 * at end of vblank.
 */
-   dm_set_vupdate_irq(new_state->base.crtc, true);
+   if (adev->family <= AMDGPU_FAMILY_AI)
+   dm_set_vupdate_irq(new_state->base.crtc, true);
drm_crtc_vblank_get(new_state->base.crtc);
DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
 __func__, new_state->base.crtc->base.id);
--
2.23.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH v3] drm/amd/display: Disable VUpdate interrupt for DCN hardware

2019-11-05 Thread sunpeng.li
From: Leo Li 

[Why]

On DCN hardware, the crtc_high_irq handler makes vupdate_high_irq
handler redundant.

All the vupdate handler does is handle vblank events, and update vrr
for DCE hw (excluding VEGA, more on that later). As far as usermode is
concerned. vstartup happens close enough to vupdate on DCN that it can
be considered the "same". Handling vblank and updating vrr at vstartup
effectively replaces vupdate on DCN.

Vega is a bit special. Like DCN, the VRR registers on Vega are
double-buffered, and swapped at vupdate. But Unlike DCN, it lacks a
vstartup interrupt. This means we can't quite remove the vupdate handler
for it, since delayed user events due to vrr are sent off there.

[How]

Remove registration of vupdate interrupt handler for DCN. Disable
vupdate interrupt if asic family DCN, enable otherwise.

Signed-off-by: Leo Li 
---

v2: Don't exclude vega when enabling vupdate interrupts

v3: Move FAMILY_AI check inside dm_set_vupdate_irq()

 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 32 +++
 1 file changed, 4 insertions(+), 28 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 256a23a0ec28..d40185dfd0c0 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -2241,34 +2241,6 @@ static int dcn10_register_irq_handlers(struct 
amdgpu_device *adev)
dm_dcn_crtc_high_irq, c_irq_params);
}
 
-   /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
-* the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
-* to trigger at end of each vblank, regardless of state of the lock,
-* matching DCE behaviour.
-*/
-   for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
-i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + 
adev->mode_info.num_crtc - 1;
-i++) {
-   r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, 
&adev->vupdate_irq);
-
-   if (r) {
-   DRM_ERROR("Failed to add vupdate irq id!\n");
-   return r;
-   }
-
-   int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
-   int_params.irq_source =
-   dc_interrupt_to_irq_source(dc, i, 0);
-
-   c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - 
DC_IRQ_SOURCE_VUPDATE1];
-
-   c_irq_params->adev = adev;
-   c_irq_params->irq_src = int_params.irq_source;
-
-   amdgpu_dm_irq_register_interrupt(adev, &int_params,
-   dm_vupdate_high_irq, c_irq_params);
-   }
-
/* Use GRPH_PFLIP interrupt */
for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + 
adev->mode_info.num_crtc - 1;
@@ -4249,6 +4221,10 @@ static inline int dm_set_vupdate_irq(struct drm_crtc 
*crtc, bool enable)
struct amdgpu_device *adev = crtc->dev->dev_private;
int rc;
 
+   /* Do not set vupdate for DCN hardware */
+   if (adev->family <= AMDGPU_FAMILY_AI)
+   return 0;
+
irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
 
rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
-- 
2.23.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH v4] drm/amd/display: Disable VUpdate interrupt for DCN hardware

2019-11-05 Thread sunpeng.li
From: Leo Li 

[Why]

On DCN hardware, the crtc_high_irq handler makes vupdate_high_irq
handler redundant.

All the vupdate handler does is handle vblank events, and update vrr
for DCE hw (excluding VEGA, more on that later). As far as usermode is
concerned. vstartup happens close enough to vupdate on DCN that it can
be considered the "same". Handling vblank and updating vrr at vstartup
effectively replaces vupdate on DCN.

Vega is a bit special. Like DCN, the VRR registers on Vega are
double-buffered, and swapped at vupdate. But Unlike DCN, it lacks a
vstartup interrupt. This means we can't quite remove the vupdate handler
for it, since delayed user events due to vrr are sent off there.

[How]

Remove registration of vupdate interrupt handler for DCN. Disable
vupdate interrupt if asic family DCN, enable otherwise.

Signed-off-by: Leo Li 
---

v2: Don't exclude vega when enabling vupdate interrupts

v3: Move FAMILY_AI check inside dm_set_vupdate_irq()

v4: Correct a brain fart

 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 32 +++
 1 file changed, 4 insertions(+), 28 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 256a23a0ec28..d40185dfd0c0 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -2241,34 +2241,6 @@ static int dcn10_register_irq_handlers(struct 
amdgpu_device *adev)
dm_dcn_crtc_high_irq, c_irq_params);
}

-   /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
-* the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
-* to trigger at end of each vblank, regardless of state of the lock,
-* matching DCE behaviour.
-*/
-   for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
-i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + 
adev->mode_info.num_crtc - 1;
-i++) {
-   r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, 
&adev->vupdate_irq);
-
-   if (r) {
-   DRM_ERROR("Failed to add vupdate irq id!\n");
-   return r;
-   }
-
-   int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
-   int_params.irq_source =
-   dc_interrupt_to_irq_source(dc, i, 0);
-
-   c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - 
DC_IRQ_SOURCE_VUPDATE1];
-
-   c_irq_params->adev = adev;
-   c_irq_params->irq_src = int_params.irq_source;
-
-   amdgpu_dm_irq_register_interrupt(adev, &int_params,
-   dm_vupdate_high_irq, c_irq_params);
-   }
-
/* Use GRPH_PFLIP interrupt */
for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + 
adev->mode_info.num_crtc - 1;
@@ -4249,6 +4221,10 @@ static inline int dm_set_vupdate_irq(struct drm_crtc 
*crtc, bool enable)
struct amdgpu_device *adev = crtc->dev->dev_private;
int rc;

+   /* Do not set vupdate for DCN hardware */
+   if (adev->family > AMDGPU_FAMILY_AI)
+   return 0;
+
irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;

rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
--
2.23.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 00/16] DC Patches Sep 26, 2018

2018-09-26 Thread sunpeng.li
From: Leo Li 

Summary of change:
* Edid emulation fix
* S3 resume fix on Vega10
* Add build types for internal tracking
* Fix screen corruption on polaris
* Interlace video timing fix

Bhawanpreet Lakha (1):
  drm/amd/display: Fix Edid emulation for linux

Charlene Liu (2):
  drm/amd/display: fix 4K stereo screen flash issue
  drm/amd/display: fix Interlace video timing.

Eric Yang (2):
  drm/amd/display: block DP YCbCr420 modes
  drm/amd/display: clean up encoding checks

Eryk Brol (1):
  drm/amd/display: Add function to fetch clock requirements

Jun Lei (2):
  drm/amd/display: Add DC build_id to determine build type
  drm/amd/display: fix memory leak in resource pools

Leo Li (1):
  drm/amd/display: Flatten irq handler data struct

Murton Liu (1):
  drm/amd/display: HLK Periodic Frame Notification test failed

Nicholas Kazlauskas (1):
  drm/amd/display: Raise dispclk value for dce_update_clocks

Nikola Cornij (1):
  drm/amd/display: Add a check-function for virtual signal type

Roman Li (1):
  drm/amd/display: Fix Vega10 lightup on S3 resume

Su Sung Chung (1):
  drm/amd/display: Calculate swizzle mode using bpp during validation

Tony Cheng (1):
  drm/amd/display: dc 3.1.68

Yongqiang Sun (1):
  drm/amd/display: WA for DF keeps awake after S0i3.

 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c  | 139 -
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c  |  37 +++---
 drivers/gpu/drm/amd/display/dc/core/dc.c   |  46 +++
 drivers/gpu/drm/amd/display/dc/core/dc_link.c  |   4 +-
 drivers/gpu/drm/amd/display/dc/core/dc_resource.c  |  40 ++
 drivers/gpu/drm/amd/display/dc/dc.h|   8 +-
 drivers/gpu/drm/amd/display/dc/dc_hw_types.h   |   3 +-
 drivers/gpu/drm/amd/display/dc/dc_link.h   |   1 +
 drivers/gpu/drm/amd/display/dc/dc_types.h  |  12 ++
 drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c|   5 +
 .../gpu/drm/amd/display/dc/dce/dce_link_encoder.c  |  17 +--
 .../drm/amd/display/dc/dce100/dce100_resource.c|   5 +-
 .../amd/display/dc/dce110/dce110_hw_sequencer.c|   2 +-
 .../amd/display/dc/dce110/dce110_hw_sequencer.h|   5 -
 .../drm/amd/display/dc/dce110/dce110_resource.c|   5 +-
 .../drm/amd/display/dc/dce112/dce112_resource.c|  11 +-
 .../amd/display/dc/dce120/dce120_hw_sequencer.c|  12 --
 .../drm/amd/display/dc/dce120/dce120_resource.c|   3 +-
 .../gpu/drm/amd/display/dc/dce80/dce80_resource.c  |   5 +-
 .../gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c|  17 +++
 .../gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h|   4 +
 .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c  |  14 +++
 .../drm/amd/display/dc/dcn10/dcn10_link_encoder.c  |  18 +--
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c  |  34 ++---
 .../gpu/drm/amd/display/dc/dcn10/dcn10_resource.c  |  26 +++-
 drivers/gpu/drm/amd/display/dc/inc/core_types.h|   3 +
 .../gpu/drm/amd/display/dc/inc/hw/link_encoder.h   |   1 -
 drivers/gpu/drm/amd/display/dc/inc/resource.h  |   3 +
 drivers/gpu/drm/amd/display/include/signal_types.h |   5 +
 29 files changed, 335 insertions(+), 150 deletions(-)

-- 
2.7.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 01/16] drm/amd/display: Add DC build_id to determine build type

2018-09-26 Thread sunpeng.li
From: Jun Lei 

[why]
Sometimes there are indications that the incorrect driver is being
loaded in automated tests. This change adds the ability for builds to
be tagged with a string, and picked up by the test infrastructure.

[how]
dc.c will allocate const for build id, which is init-ed with default
value, indicating production build. For test builds, build server will
find/replace this value. The test machine will then verify this value.

Signed-off-by: Jun Lei 
Reviewed-by: Tony Cheng 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/core/dc.c | 3 +++
 drivers/gpu/drm/amd/display/dc/dc.h  | 2 ++
 2 files changed, 5 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c 
b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 76fe5a9..99ecaeb 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -60,6 +60,7 @@
 #define DC_LOGGER \
dc->ctx->logger
 
+const static char DC_BUILD_ID[] = "production-build";
 
 
/***
  * Private functions
@@ -758,6 +759,8 @@ struct dc *dc_create(const struct dc_init_data *init_params)
 
dc->config = init_params->flags;
 
+   dc->build_id = DC_BUILD_ID;
+
DC_LOG_DC("Display Core initialized\n");
 
 
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h 
b/drivers/gpu/drm/amd/display/dc/dc.h
index 11ea2a2..5f65bea 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -315,6 +315,8 @@ struct dc {
struct compressor *fbc_compressor;
 
struct dc_debug_data debug_data;
+
+   const char *build_id;
 };
 
 enum frame_buffer_mode {
-- 
2.7.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 02/16] drm/amd/display: fix 4K stereo screen flash issue

2018-09-26 Thread sunpeng.li
From: Charlene Liu 

[Why]
HDMI_scramber is not enabled for pixel rate >340Mhz.
[How]
Calculate the phy clock to include the Hw frame packing factor.

Signed-off-by: Charlene Liu 
Reviewed-by: Chris Park 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 2d6a430..a3fee37 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -1975,6 +1975,9 @@ static void calculate_phy_pix_clks(struct dc_stream_state 
*stream)
else
stream->phy_pix_clk =
stream->timing.pix_clk_khz;
+
+   if (stream->timing.timing_3d_format == 
TIMING_3D_FORMAT_HW_FRAME_PACKING)
+   stream->phy_pix_clk *= 2;
 }
 
 enum dc_status resource_map_pool_resources(
-- 
2.7.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 03/16] drm/amd/display: Add a check-function for virtual signal type

2018-09-26 Thread sunpeng.li
From: Nikola Cornij 

[why]
Same functions exist for all other signal types.

[how]
Add a function that checks against virtual signal type.

Signed-off-by: Nikola Cornij 
Reviewed-by: Leo Li 
---
 drivers/gpu/drm/amd/display/include/signal_types.h | 5 +
 1 file changed, 5 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/include/signal_types.h 
b/drivers/gpu/drm/amd/display/include/signal_types.h
index 03476b1..f56d289 100644
--- a/drivers/gpu/drm/amd/display/include/signal_types.h
+++ b/drivers/gpu/drm/amd/display/include/signal_types.h
@@ -102,4 +102,9 @@ static inline bool dc_is_audio_capable_signal(enum 
signal_type signal)
dc_is_hdmi_signal(signal));
 }
 
+static inline bool dc_is_virtual_signal(enum signal_type signal)
+{
+   return (signal == SIGNAL_TYPE_VIRTUAL);
+}
+
 #endif
-- 
2.7.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 04/16] drm/amd/display: Calculate swizzle mode using bpp during validation

2018-09-26 Thread sunpeng.li
From: Su Sung Chung 

[Why]
Previously bandwidth validation was failing because swizzle mode was not
initialized during plane_state allocation. The swizzle mode was
calculated using pixed format which is how swizzle mode is initially
calculated in addrlib.

[How]
* Set default swizzle mode for validation to DC_SW_UNKNOWN
* Created new function in dcn10_assign_swizzle_mode which sets the
  plane swizzle mode based on selected pixed format
* Added the call of assign_swizzle_mode into dc_validate_global_state
* Set failsafe swizzle mode back to DC_SW_LINEAR

Signed-off-by: Su Sung Chung 
Reviewed-by: Eric Yang 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/core/dc.c   | 30 ++
 drivers/gpu/drm/amd/display/dc/core/dc_resource.c  | 37 ++
 drivers/gpu/drm/amd/display/dc/dc_hw_types.h   |  3 +-
 .../gpu/drm/amd/display/dc/dcn10/dcn10_resource.c  | 21 +++-
 drivers/gpu/drm/amd/display/dc/inc/core_types.h|  3 ++
 drivers/gpu/drm/amd/display/dc/inc/resource.h  |  3 ++
 6 files changed, 67 insertions(+), 30 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c 
b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 99ecaeb..a0e933f 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -1113,32 +1113,6 @@ static bool is_surface_in_context(
return false;
 }
 
-static unsigned int pixel_format_to_bpp(enum surface_pixel_format format)
-{
-   switch (format) {
-   case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
-   case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
-   return 12;
-   case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
-   case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
-   case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
-   case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
-   return 16;
-   case SURFACE_PIXEL_FORMAT_GRPH_ARGB:
-   case SURFACE_PIXEL_FORMAT_GRPH_ABGR:
-   case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
-   case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
-   return 32;
-   case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
-   case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
-   case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
-   return 64;
-   default:
-   ASSERT_CRITICAL(false);
-   return -1;
-   }
-}
-
 static enum surface_update_type get_plane_info_update_type(const struct 
dc_surface_update *u)
 {
union surface_update_flags *update_flags = &u->surface->update_flags;
@@ -1172,8 +1146,8 @@ static enum surface_update_type 
get_plane_info_update_type(const struct dc_surfa
|| u->plane_info->dcc.grph.meta_pitch != 
u->surface->dcc.grph.meta_pitch)
update_flags->bits.dcc_change = 1;
 
-   if (pixel_format_to_bpp(u->plane_info->format) !=
-   pixel_format_to_bpp(u->surface->format))
+   if (resource_pixel_format_to_bpp(u->plane_info->format) !=
+   resource_pixel_format_to_bpp(u->surface->format))
/* different bytes per element will require full bandwidth
 * and DML calculation
 */
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index a3fee37..b6fe29b 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -2099,6 +2099,14 @@ enum dc_status dc_validate_global_state(
if (pipe_ctx->stream != stream)
continue;
 
+   if (dc->res_pool->funcs->get_default_swizzle_mode &&
+   pipe_ctx->plane_state &&
+   
pipe_ctx->plane_state->tiling_info.gfx9.swizzle == DC_SW_UNKNOWN) {
+   result = 
dc->res_pool->funcs->get_default_swizzle_mode(pipe_ctx->plane_state);
+   if (result != DC_OK)
+   return result;
+   }
+
/* Switch to dp clock source only if there is
 * no non dp stream that shares the same timing
 * with the dp stream.
@@ -2888,3 +2896,32 @@ enum dc_status dc_validate_plane(struct dc *dc, const 
struct dc_plane_state *pla
 
return res;
 }
+
+unsigned int resource_pixel_format_to_bpp(enum surface_pixel_format format)
+{
+   switch (format) {
+   case SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS:
+   return 8;
+   case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
+   case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
+   return 12;
+   case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
+   case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
+   case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
+   case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
+  

[PATCH 05/16] drm/amd/display: Add function to fetch clock requirements

2018-09-26 Thread sunpeng.li
From: Eryk Brol 

Also add dram clock to clocks struct, for systems that uses them.

Signed-off-by: Eryk Brol 
Reviewed-by: Jun Lei 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/core/dc.c  | 13 +
 drivers/gpu/drm/amd/display/dc/dc.h   |  4 +++-
 drivers/gpu/drm/amd/display/dc/dc_types.h | 12 
 3 files changed, 28 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c 
b/drivers/gpu/drm/amd/display/dc/core/dc.c
index a0e933f..7c491c9 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -1836,3 +1836,16 @@ void dc_link_remove_remote_sink(struct dc_link *link, 
struct dc_sink *sink)
}
}
 }
+
+void get_clock_requirements_for_state(struct dc_state *state, struct 
AsicStateEx *info)
+{
+   info->displayClock  = (unsigned 
int)state->bw.dcn.clk.dispclk_khz;
+   info->engineClock   = (unsigned 
int)state->bw.dcn.clk.dcfclk_khz;
+   info->memoryClock   = (unsigned 
int)state->bw.dcn.clk.dramclk_khz;
+   info->maxSupportedDppClock  = (unsigned 
int)state->bw.dcn.clk.max_supported_dppclk_khz;
+   info->dppClock  = (unsigned 
int)state->bw.dcn.clk.dppclk_khz;
+   info->socClock  = (unsigned 
int)state->bw.dcn.clk.socclk_khz;
+   info->dcfClockDeepSleep = (unsigned 
int)state->bw.dcn.clk.dcfclk_deep_sleep_khz;
+   info->fClock= (unsigned 
int)state->bw.dcn.clk.fclk_khz;
+   info->phyClock  = (unsigned 
int)state->bw.dcn.clk.phyclk_khz;
+}
\ No newline at end of file
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h 
b/drivers/gpu/drm/amd/display/dc/dc.h
index 5f65bea..f328483 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -44,7 +44,6 @@
 #define MAX_STREAMS 6
 #define MAX_SINKS_PER_LINK 4
 
-
 
/***
  * Display Core Interfaces
  
**/
@@ -208,6 +207,7 @@ struct dc_clocks {
int dcfclk_deep_sleep_khz;
int fclk_khz;
int phyclk_khz;
+   int dramclk_khz;
 };
 
 struct dc_debug_options {
@@ -601,6 +601,8 @@ struct dc_validation_set {
 
 enum dc_status dc_validate_plane(struct dc *dc, const struct dc_plane_state 
*plane_state);
 
+void get_clock_requirements_for_state(struct dc_state *state, struct 
AsicStateEx *info);
+
 enum dc_status dc_validate_global_state(
struct dc *dc,
struct dc_state *new_ctx);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h 
b/drivers/gpu/drm/amd/display/dc/dc_types.h
index 4fb6278..6e12d64 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -659,4 +659,16 @@ enum i2c_mot_mode {
I2C_MOT_FALSE
 };
 
+struct AsicStateEx {
+   unsigned int memoryClock;
+   unsigned int displayClock;
+   unsigned int engineClock;
+   unsigned int maxSupportedDppClock;
+   unsigned int dppClock;
+   unsigned int socClock;
+   unsigned int dcfClockDeepSleep;
+   unsigned int fClock;
+   unsigned int phyClock;
+};
+
 #endif /* DC_TYPES_H_ */
-- 
2.7.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 06/16] drm/amd/display: block DP YCbCr420 modes

2018-09-26 Thread sunpeng.li
From: Eric Yang 

[why]
Currently not supported, will black screen when set.

[How]
Fail validate timing helper for those modes.

Signed-off-by: Eric Yang 
Reviewed-by: Tony Cheng 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c | 3 +++
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c | 3 +++
 2 files changed, 6 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c 
b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
index 4942590..70eb9472 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
@@ -662,6 +662,9 @@ bool dce110_link_encoder_validate_dp_output(
const struct dce110_link_encoder *enc110,
const struct dc_crtc_timing *crtc_timing)
 {
+   if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
+   return false;
+
/* default RGB only */
if (crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB)
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c 
b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
index 6f67520..bef0011 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
@@ -606,6 +606,9 @@ bool dcn10_link_encoder_validate_dp_output(
const struct dcn10_link_encoder *enc10,
const struct dc_crtc_timing *crtc_timing)
 {
+   if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
+   return false;
+
/* default RGB only */
if (crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB)
return true;
-- 
2.7.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 07/16] drm/amd/display: clean up encoding checks

2018-09-26 Thread sunpeng.li
From: Eric Yang 

[Why]
All ASICS we support has YCbCr support, so
the check is unnecessary, the currently logic
in validate output also returns true all
the time, so the unneccessary logic is removed

Signed-off-by: Eric Yang 
Reviewed-by: Tony Cheng 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c   | 16 +---
 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c |  3 +--
 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c |  3 +--
 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c |  3 +--
 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c |  1 -
 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c   |  3 +--
 .../gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c   | 17 +
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c   |  3 +--
 drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h|  1 -
 9 files changed, 7 insertions(+), 43 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c 
b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
index 70eb9472..366bc8c 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
@@ -665,21 +665,7 @@ bool dce110_link_encoder_validate_dp_output(
if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
return false;
 
-   /* default RGB only */
-   if (crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB)
-   return true;
-
-   if (enc110->base.features.flags.bits.IS_YCBCR_CAPABLE)
-   return true;
-
-   /* for DCE 8.x or later DP Y-only feature,
-* we need ASIC cap + FeatureSupportDPYonly, not support 666 */
-   if (crtc_timing->flags.Y_ONLY &&
-   enc110->base.features.flags.bits.IS_YCBCR_CAPABLE &&
-   crtc_timing->display_color_depth != COLOR_DEPTH_666)
-   return true;
-
-   return false;
+   return true;
 }
 
 void dce110_link_encoder_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 
b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
index b1cc388..5b75460 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
@@ -551,8 +551,7 @@ static const struct encoder_feature_support 
link_enc_feature = {
.max_hdmi_deep_color = COLOR_DEPTH_121212,
.max_hdmi_pixel_clock = 30,
.flags.bits.IS_HBR2_CAPABLE = true,
-   .flags.bits.IS_TPS3_CAPABLE = true,
-   .flags.bits.IS_YCBCR_CAPABLE = true
+   .flags.bits.IS_TPS3_CAPABLE = true
 };
 
 struct link_encoder *dce100_link_encoder_create(
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 
b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
index b44cc70..4607a6a 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
@@ -570,8 +570,7 @@ static const struct encoder_feature_support 
link_enc_feature = {
.max_hdmi_deep_color = COLOR_DEPTH_121212,
.max_hdmi_pixel_clock = 594000,
.flags.bits.IS_HBR2_CAPABLE = true,
-   .flags.bits.IS_TPS3_CAPABLE = true,
-   .flags.bits.IS_YCBCR_CAPABLE = true
+   .flags.bits.IS_TPS3_CAPABLE = true
 };
 
 static struct link_encoder *dce110_link_encoder_create(
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 
b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
index 0f8332e..8b5a269 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
@@ -555,8 +555,7 @@ static const struct encoder_feature_support 
link_enc_feature = {
.flags.bits.IS_HBR2_CAPABLE = true,
.flags.bits.IS_HBR3_CAPABLE = true,
.flags.bits.IS_TPS3_CAPABLE = true,
-   .flags.bits.IS_TPS4_CAPABLE = true,
-   .flags.bits.IS_YCBCR_CAPABLE = true
+   .flags.bits.IS_TPS4_CAPABLE = true
 };
 
 struct link_encoder *dce112_link_encoder_create(
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 
b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
index 5905580..53a7a2f 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
@@ -609,7 +609,6 @@ static const struct encoder_feature_support 
link_enc_feature = {
.flags.bits.IS_HBR3_CAPABLE = true,
.flags.bits.IS_TPS3_CAPABLE = true,
.flags.bits.IS_TPS4_CAPABLE = true,
-   .flags.bits.IS_YCBCR_CAPABLE = true
 };
 
 static struct link_encoder *dce120_link_encoder_create(
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 
b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resourc

[PATCH 11/16] drm/amd/display: fix memory leak in resource pools

2018-09-26 Thread sunpeng.li
From: Jun Lei 

[why]
ddc engines were recently changed to be independently tracked
from pipe count.  the change was reflected in resource constructor
but not in destructor.  this manifests as a memory leak when
pipe harvesting is enabled, since not all constructed ddc engines
are freed

[how]
make destructor symmetric with constructor for all dcX_resource

Signed-off-by: Jun Lei 
Reviewed-by: Aric Cyr 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c | 2 ++
 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c | 2 ++
 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c | 8 +---
 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c | 2 ++
 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c   | 2 ++
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c   | 2 ++
 6 files changed, 15 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c 
b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
index 5b75460..14754a8 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
@@ -689,7 +689,9 @@ static void destruct(struct dce110_resource_pool *pool)

kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
pool->base.timing_generators[i] = NULL;
}
+   }
 
+   for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
if (pool->base.engines[i] != NULL)
dce110_engine_destroy(&pool->base.engines[i]);
if (pool->base.hw_i2cs[i] != NULL) {
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 
b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
index 4607a6a..de19093 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
@@ -719,7 +719,9 @@ static void destruct(struct dce110_resource_pool *pool)

kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
pool->base.timing_generators[i] = NULL;
}
+   }
 
+   for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
if (pool->base.engines[i] != NULL)
dce110_engine_destroy(&pool->base.engines[i]);
if (pool->base.hw_i2cs[i] != NULL) {
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 
b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
index 8b5a269..3ce79c2 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
@@ -693,9 +693,6 @@ static void destruct(struct dce110_resource_pool *pool)
if (pool->base.opps[i] != NULL)
dce110_opp_destroy(&pool->base.opps[i]);
 
-   if (pool->base.engines[i] != NULL)
-   dce110_engine_destroy(&pool->base.engines[i]);
-
if (pool->base.transforms[i] != NULL)
dce112_transform_destroy(&pool->base.transforms[i]);
 
@@ -711,6 +708,11 @@ static void destruct(struct dce110_resource_pool *pool)

kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
pool->base.timing_generators[i] = NULL;
}
+   }
+
+   for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
+   if (pool->base.engines[i] != NULL)
+   dce110_engine_destroy(&pool->base.engines[i]);
if (pool->base.hw_i2cs[i] != NULL) {
kfree(pool->base.hw_i2cs[i]);
pool->base.hw_i2cs[i] = NULL;
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 
b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
index 53a7a2f..79ab5f9 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
@@ -533,7 +533,9 @@ static void destruct(struct dce110_resource_pool *pool)

kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
pool->base.timing_generators[i] = NULL;
}
+   }
 
+   for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
if (pool->base.engines[i] != NULL)
dce110_engine_destroy(&pool->base.engines[i]);
if (pool->base.hw_i2cs[i] != NULL) {
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c 
b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
index 79e5c5c..d68f951 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
@@ -738,7 +738,9 @@ static void destruct(struct dce110_resource_pool *pool)

kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
pool->base.timing_generators[i] = 

[PATCH 08/16] drm/amd/display: WA for DF keeps awake after S0i3.

2018-09-26 Thread sunpeng.li
From: Yongqiang Sun 

[Why]
DF keeps awake after S0i3 resume due to DRAM_STATE_CNTL
is set by bios command table during dcn init_hw.

[How]
As a work around, check STATE_CNTL status before init_hw,
if it is 0 before init_hw and set to 1 after init_hw,
change it to 0.

Signed-off-by: Yongqiang Sun 
Reviewed-by: Tony Cheng 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c | 17 +
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h |  4 
 .../gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c   | 14 ++
 3 files changed, 35 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c 
b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
index 1ea91e1..69345ce6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
@@ -87,6 +87,23 @@ void hubbub1_wm_read_state(struct hubbub *hubbub,
s->dram_clk_chanage = 
REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D);
 }
 
+void hubbub1_disable_allow_self_refresh(struct hubbub *hubbub)
+{
+   REG_UPDATE(DCHUBBUB_ARB_DRAM_STATE_CNTL,
+   DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE, 0);
+}
+
+bool hububu1_is_allow_self_refresh_enabled(struct hubbub *hubbub)
+{
+   uint32_t enable = 0;
+
+   REG_GET(DCHUBBUB_ARB_DRAM_STATE_CNTL,
+   DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE, &enable);
+
+   return true ? false : enable;
+}
+
+
 bool hubbub1_verify_allow_pstate_change_high(
struct hubbub *hubbub)
 {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h 
b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
index d6e596e..d0f03d1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
@@ -203,6 +203,10 @@ void hubbub1_program_watermarks(
unsigned int refclk_mhz,
bool safe_to_lower);
 
+void hubbub1_disable_allow_self_refresh(struct hubbub *hubbub);
+
+bool hububu1_is_allow_self_refresh_enabled(struct hubbub *hubub);
+
 void hubbub1_toggle_watermark_change_req(
struct hubbub *hubbub);
 
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c 
b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index a881ff5..193184a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -997,7 +997,21 @@ static void dcn10_init_hw(struct dc *dc)
} else {
 
if (!dcb->funcs->is_accelerated_mode(dcb)) {
+   bool allow_self_fresh_force_enable =
+   
hububu1_is_allow_self_refresh_enabled(dc->res_pool->hubbub);
+
bios_golden_init(dc);
+
+   /* WA for making DF sleep when idle after resume from 
S0i3.
+* DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE is set 
to 1 by
+* command table, if 
DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 0
+* before calling command table and it changed to 1 
after,
+* it should be set back to 0.
+*/
+   if (allow_self_fresh_force_enable == false &&
+   
hububu1_is_allow_self_refresh_enabled(dc->res_pool->hubbub))
+   
hubbub1_disable_allow_self_refresh(dc->res_pool->hubbub);
+
disable_vga(dc->hwseq);
}
 
-- 
2.7.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 09/16] drm/amd/display: Fix Edid emulation for linux

2018-09-26 Thread sunpeng.li
From: Bhawanpreet Lakha 

[Why]
EDID emulation didn't work properly for linux, as we stop programming
if nothing is connected physically.

[How]
We get a flag from DRM when we want to do edid emulation. We check if
this flag is true and nothing is connected physically, if so we only
program the front end using VIRTUAL_SIGNAL.

Signed-off-by: Bhawanpreet Lakha 
Reviewed-by: Harry Wentland 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 139 +-
 drivers/gpu/drm/amd/display/dc/core/dc_link.c |   4 +-
 drivers/gpu/drm/amd/display/dc/dc_link.h  |   1 +
 3 files changed, 137 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 8c82185..774db34 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -718,6 +718,87 @@ amdgpu_dm_find_first_crtc_matching_connector(struct 
drm_atomic_state *state,
return NULL;
 }
 
+static void emulated_link_detect(struct dc_link *link)
+{
+   struct dc_sink_init_data sink_init_data = { 0 };
+   struct display_sink_capability sink_caps = { 0 };
+   enum dc_edid_status edid_status;
+   struct dc_context *dc_ctx = link->ctx;
+   struct dc_sink *sink = NULL;
+   struct dc_sink *prev_sink = NULL;
+
+   link->type = dc_connection_none;
+   prev_sink = link->local_sink;
+
+   if (prev_sink != NULL)
+   dc_sink_retain(prev_sink);
+
+   switch (link->connector_signal) {
+   case SIGNAL_TYPE_HDMI_TYPE_A: {
+   sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
+   sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
+   break;
+   }
+
+   case SIGNAL_TYPE_DVI_SINGLE_LINK: {
+   sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
+   sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+   break;
+   }
+
+   case SIGNAL_TYPE_DVI_DUAL_LINK: {
+   sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
+   sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
+   break;
+   }
+
+   case SIGNAL_TYPE_LVDS: {
+   sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
+   sink_caps.signal = SIGNAL_TYPE_LVDS;
+   break;
+   }
+
+   case SIGNAL_TYPE_EDP: {
+   sink_caps.transaction_type =
+   DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
+   sink_caps.signal = SIGNAL_TYPE_EDP;
+   break;
+   }
+
+   case SIGNAL_TYPE_DISPLAY_PORT: {
+   sink_caps.transaction_type =
+   DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
+   sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
+   break;
+   }
+
+   default:
+   DC_ERROR("Invalid connector type! signal:%d\n",
+   link->connector_signal);
+   return;
+   }
+
+   sink_init_data.link = link;
+   sink_init_data.sink_signal = sink_caps.signal;
+
+   sink = dc_sink_create(&sink_init_data);
+   if (!sink) {
+   DC_ERROR("Failed to create sink!\n");
+   return;
+   }
+
+   link->local_sink = sink;
+
+   edid_status = dm_helpers_read_local_edid(
+   link->ctx,
+   link,
+   sink);
+
+   if (edid_status != EDID_OK)
+   DC_ERROR("Failed to read EDID");
+
+}
+
 static int dm_resume(void *handle)
 {
struct amdgpu_device *adev = handle;
@@ -731,6 +812,7 @@ static int dm_resume(void *handle)
struct drm_plane *plane;
struct drm_plane_state *new_plane_state;
struct dm_plane_state *dm_new_plane_state;
+   enum dc_connection_type new_connection_type = dc_connection_none;
int ret;
int i;
 
@@ -761,7 +843,13 @@ static int dm_resume(void *handle)
continue;
 
mutex_lock(&aconnector->hpd_lock);
-   dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
+   if (!dc_link_detect_sink(aconnector->dc_link, 
&new_connection_type))
+   DRM_ERROR("KMS: Failed to detect connector\n");
+
+   if (aconnector->base.force && new_connection_type == 
dc_connection_none)
+   emulated_link_detect(aconnector->dc_link);
+   else
+   dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
 
if (aconnector->fake_enable && aconnector->dc_link->local_sink)
aconnector->fake_enable = false;
@@ -1006,6 +1094,7 @@ static void handle_hpd_irq(void *param)
struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector 
*)param;
struct drm_connector *connector = &aconnector->base;
struct drm_device *dev = connector->dev;
+   enum dc_connect

[PATCH 10/16] drm/amd/display: dc 3.1.68

2018-09-26 Thread sunpeng.li
From: Tony Cheng 

Signed-off-by: Tony Cheng 
Reviewed-by: Steven Chiu 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/dc.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dc.h 
b/drivers/gpu/drm/amd/display/dc/dc.h
index f328483..1995271 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -38,7 +38,7 @@
 #include "inc/compressor.h"
 #include "dml/display_mode_lib.h"
 
-#define DC_VER "3.1.67"
+#define DC_VER "3.1.68"
 
 #define MAX_SURFACES 3
 #define MAX_STREAMS 6
-- 
2.7.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 13/16] drm/amd/display: fix Interlace video timing.

2018-09-26 Thread sunpeng.li
From: Charlene Liu 

[Description] interlace mode shows wrong vertical timing.
Interface timing in Edid is half vertical timing as progressive timing.
driver doubled the vertical timing in edid_paser,
no need to double in optc again.

Signed-off-by: Charlene Liu 
Reviewed-by: Chris Park 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c | 32 +--
 1 file changed, 7 insertions(+), 25 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c 
b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
index 411f892..ad46294 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
@@ -98,7 +98,6 @@ static uint32_t get_start_vline(struct timing_generator 
*optc, const struct dc_c
struct dc_crtc_timing patched_crtc_timing;
int vesa_sync_start;
int asic_blank_end;
-   int interlace_factor;
int vertical_line_start;
 
patched_crtc_timing = *dc_crtc_timing;
@@ -112,16 +111,13 @@ static uint32_t get_start_vline(struct timing_generator 
*optc, const struct dc_c
vesa_sync_start -
patched_crtc_timing.h_border_left;
 
-   interlace_factor = patched_crtc_timing.flags.INTERLACE ? 2 : 1;
-
vesa_sync_start = patched_crtc_timing.v_addressable +
patched_crtc_timing.v_border_bottom +
patched_crtc_timing.v_front_porch;
 
asic_blank_end = (patched_crtc_timing.v_total -
vesa_sync_start -
-   patched_crtc_timing.v_border_top)
-   * interlace_factor;
+   patched_crtc_timing.v_border_top);
 
vertical_line_start = asic_blank_end - 
optc->dlg_otg_param.vstartup_start + 1;
if (vertical_line_start < 0) {
@@ -186,7 +182,6 @@ void optc1_program_timing(
uint32_t v_sync_end;
uint32_t v_init, v_fp2;
uint32_t h_sync_polarity, v_sync_polarity;
-   uint32_t interlace_factor;
uint32_t start_point = 0;
uint32_t field_num = 0;
uint32_t h_div_2;
@@ -237,16 +232,8 @@ void optc1_program_timing(
REG_UPDATE(OTG_H_SYNC_A_CNTL,
OTG_H_SYNC_A_POL, h_sync_polarity);
 
-   /* Load vertical timing */
+   v_total = patched_crtc_timing.v_total - 1;
 
-   /* CRTC_V_TOTAL = v_total - 1 */
-   if (patched_crtc_timing.flags.INTERLACE) {
-   interlace_factor = 2;
-   v_total = 2 * patched_crtc_timing.v_total;
-   } else {
-   interlace_factor = 1;
-   v_total = patched_crtc_timing.v_total - 1;
-   }
REG_SET(OTG_V_TOTAL, 0,
OTG_V_TOTAL, v_total);
 
@@ -259,7 +246,7 @@ void optc1_program_timing(
OTG_V_TOTAL_MIN, v_total);
 
/* v_sync_start = 0, v_sync_end = v_sync_width */
-   v_sync_end = patched_crtc_timing.v_sync_width * interlace_factor;
+   v_sync_end = patched_crtc_timing.v_sync_width;
 
REG_UPDATE_2(OTG_V_SYNC_A,
OTG_V_SYNC_A_START, 0,
@@ -271,15 +258,13 @@ void optc1_program_timing(
 
asic_blank_end = (patched_crtc_timing.v_total -
vesa_sync_start -
-   patched_crtc_timing.v_border_top)
-   * interlace_factor;
+   patched_crtc_timing.v_border_top);
 
/* v_blank_start = v_blank_end + v_active */
asic_blank_start = asic_blank_end +
(patched_crtc_timing.v_border_top +
patched_crtc_timing.v_addressable +
-   patched_crtc_timing.v_border_bottom)
-   * interlace_factor;
+   patched_crtc_timing.v_border_bottom);
 
REG_UPDATE_2(OTG_V_BLANK_START_END,
OTG_V_BLANK_START, asic_blank_start,
@@ -301,7 +286,7 @@ void optc1_program_timing(
0 : 1;
 
REG_UPDATE(OTG_V_SYNC_A_CNTL,
-   OTG_V_SYNC_A_POL, v_sync_polarity);
+   OTG_V_SYNC_A_POL, v_sync_polarity);
 
v_init = asic_blank_start;
if (optc->dlg_otg_param.signal == SIGNAL_TYPE_DISPLAY_PORT ||
@@ -532,7 +517,6 @@ bool optc1_validate_timing(
struct timing_generator *optc,
const struct dc_crtc_timing *timing)
 {
-   uint32_t interlace_factor;
uint32_t v_blank;
uint32_t h_blank;
uint32_t min_v_blank;
@@ -540,10 +524,8 @@ bool optc1_validate_timing(
 
ASSERT(timing != NULL);
 
-   interlace_factor = timing->flags.INTERLACE ? 2 : 1;
v_blank = (timing->v_total - timing->v_addressable -
-   timing->v_border_top - 
timing->v_border_bottom) *
-   interlace_factor;
+   timing->v_border_top - 
timing->v_border_bottom);
 
   

[PATCH 12/16] drm/amd/display: Flatten irq handler data struct

2018-09-26 Thread sunpeng.li
From: Leo Li 

[Why]
There is no reason why the common data needs to be kept separate.

[How]
Flatten the struct by moving common data into the DM IRQ struct.

Signed-off-by: Leo Li 
Reviewed-by: David Francis 
Acked-by: Leo Li 
---
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c  | 37 --
 1 file changed, 14 insertions(+), 23 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
index a910f01..a212178 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
@@ -36,17 +36,13 @@
  * Private declarations.
  */
 
-struct handler_common_data {
+struct amdgpu_dm_irq_handler_data {
struct list_head list;
interrupt_handler handler;
void *handler_arg;
 
/* DM which this handler belongs to */
struct amdgpu_display_manager *dm;
-};
-
-struct amdgpu_dm_irq_handler_data {
-   struct handler_common_data hcd;
/* DAL irq source which registered for this interrupt. */
enum dc_irq_source irq_source;
 };
@@ -61,7 +57,7 @@ struct amdgpu_dm_irq_handler_data {
  * Private functions.
  */
 
-static void init_handler_common_data(struct handler_common_data *hcd,
+static void init_handler_common_data(struct amdgpu_dm_irq_handler_data *hcd,
 void (*ih)(void *),
 void *args,
 struct amdgpu_display_manager *dm)
@@ -85,11 +81,9 @@ static void dm_irq_work_func(struct work_struct *work)
struct amdgpu_dm_irq_handler_data *handler_data;
 
list_for_each(entry, handler_list) {
-   handler_data =
-   list_entry(
-   entry,
-   struct amdgpu_dm_irq_handler_data,
-   hcd.list);
+   handler_data = list_entry(entry,
+ struct amdgpu_dm_irq_handler_data,
+ list);
 
DRM_DEBUG_KMS("DM_IRQ: work_func: for dal_src=%d\n",
handler_data->irq_source);
@@ -97,7 +91,7 @@ static void dm_irq_work_func(struct work_struct *work)
DRM_DEBUG_KMS("DM_IRQ: schedule_work: for dal_src=%d\n",
handler_data->irq_source);
 
-   handler_data->hcd.handler(handler_data->hcd.handler_arg);
+   handler_data->handler(handler_data->handler_arg);
}
 
/* Call a DAL subcomponent which registered for interrupt notification
@@ -137,11 +131,11 @@ static struct list_head *remove_irq_handler(struct 
amdgpu_device *adev,
list_for_each_safe(entry, tmp, hnd_list) {
 
handler = list_entry(entry, struct amdgpu_dm_irq_handler_data,
-   hcd.list);
+list);
 
if (ih == handler) {
/* Found our handler. Remove it from the list. */
-   list_del(&handler->hcd.list);
+   list_del(&handler->list);
handler_removed = true;
break;
}
@@ -230,8 +224,7 @@ void *amdgpu_dm_irq_register_interrupt(struct amdgpu_device 
*adev,
 
memset(handler_data, 0, sizeof(*handler_data));
 
-   init_handler_common_data(&handler_data->hcd, ih, handler_args,
-   &adev->dm);
+   init_handler_common_data(handler_data, ih, handler_args, &adev->dm);
 
irq_source = int_params->irq_source;
 
@@ -250,7 +243,7 @@ void *amdgpu_dm_irq_register_interrupt(struct amdgpu_device 
*adev,
break;
}
 
-   list_add_tail(&handler_data->hcd.list, hnd_list);
+   list_add_tail(&handler_data->list, hnd_list);
 
DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
 
@@ -462,15 +455,13 @@ static void amdgpu_dm_irq_immediate_work(struct 
amdgpu_device *adev,
entry,
&adev->dm.irq_handler_list_high_tab[irq_source]) {
 
-   handler_data =
-   list_entry(
-   entry,
-   struct amdgpu_dm_irq_handler_data,
-   hcd.list);
+   handler_data = list_entry(entry,
+ struct amdgpu_dm_irq_handler_data,
+ list);
 
/* Call a subcomponent which registered for immediate
 * interrupt notification */
-   handler_data->hcd.handler(handler_data->hcd.handler_arg);
+   handler_data->handler(handler_data->handler_arg);
}
 
DM_IRQ_TABLE_UNLOCK

[PATCH 14/16] drm/amd/display: HLK Periodic Frame Notification test failed

2018-09-26 Thread sunpeng.li
From: Murton Liu 

[Why]
Due to a small pre-fetch window, the active vline timing is a couple
of lines off when compared to what it should be.

[How]
Changed the calculation for the start vline to account for this window.

Signed-off-by: Murton Liu 
Reviewed-by: Aric Cyr 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c 
b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
index ad46294..5462668 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
@@ -150,7 +150,7 @@ void optc1_program_vline_interrupt(
req_delta_lines--;
 
if (req_delta_lines > vsync_line)
-   start_line = dc_crtc_timing->v_total - (req_delta_lines - 
vsync_line) - 1;
+   start_line = dc_crtc_timing->v_total - (req_delta_lines - 
vsync_line) + 2;
else
start_line = vsync_line - req_delta_lines;
 
-- 
2.7.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 15/16] drm/amd/display: Fix Vega10 lightup on S3 resume

2018-09-26 Thread sunpeng.li
From: Roman Li 

[Why]
There have been a few reports of Vega10 display remaining blank
after S3 resume. The regression is caused by workaround for mode
change on Vega10 - skip set_bandwidth if stream count is 0.
As a result we skipped dispclk reset on suspend, thus on resume
we may skip the clock update assuming it hasn't been changed.
On some systems it causes display blank or 'out of range'.

[How]
Revert "drm/amd/display: Fix Vega10 black screen after mode change"
Verified that it hadn't cause mode change regression.

Signed-off-by: Roman Li 
Reviewed-by: Sun peng Li 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c |  2 +-
 drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h |  5 -
 drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c | 12 
 3 files changed, 1 insertion(+), 18 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c 
b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 6b7..b75ede5 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -2537,7 +2537,7 @@ static void pplib_apply_display_requirements(
dc->prev_display_config = *pp_display_cfg;
 }
 
-void dce110_set_bandwidth(
+static void dce110_set_bandwidth(
struct dc *dc,
struct dc_state *context,
bool decrease_allowed)
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h 
b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
index e4c5db7..d6db3db 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
@@ -68,11 +68,6 @@ void dce110_fill_display_configs(
const struct dc_state *context,
struct dm_pp_display_configuration *pp_display_cfg);
 
-void dce110_set_bandwidth(
-   struct dc *dc,
-   struct dc_state *context,
-   bool decrease_allowed);
-
 uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context);
 
 void dp_receiver_power_ctrl(struct dc_link *link, bool on);
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c 
b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
index 5853522..eb0f5f9 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
@@ -244,17 +244,6 @@ static void dce120_update_dchub(
dh_data->dchub_info_valid = false;
 }
 
-static void dce120_set_bandwidth(
-   struct dc *dc,
-   struct dc_state *context,
-   bool decrease_allowed)
-{
-   if (context->stream_count <= 0)
-   return;
-
-   dce110_set_bandwidth(dc, context, decrease_allowed);
-}
-
 void dce120_hw_sequencer_construct(struct dc *dc)
 {
/* All registers used by dce11.2 match those in dce11 in offset and
@@ -263,6 +252,5 @@ void dce120_hw_sequencer_construct(struct dc *dc)
dce110_hw_sequencer_construct(dc);
dc->hwss.enable_display_power_gating = 
dce120_enable_display_power_gating;
dc->hwss.update_dchub = dce120_update_dchub;
-   dc->hwss.set_bandwidth = dce120_set_bandwidth;
 }
 
-- 
2.7.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


  1   2   3   4   5   6   7   8   >