Re: [PATCH] drm/amd/display: fix IPX enablement

2024-03-22 Thread Kazlauskas, Nicholas
[Public]

Reviewed-by: Nicholas Kazlauskas 

Regards,
Nicholas Kazlauskas


From: Mahfooz, Hamza 
Sent: Friday, March 22, 2024 2:56 PM
To: amd-gfx@lists.freedesktop.org 
Cc: Kazlauskas, Nicholas ; Li, Roman 
; Li, Sun peng (Leo) ; Wentland, Harry 
; Deucher, Alexander ; 
Limonciello, Mario ; Siqueira, Rodrigo 
; Mahfooz, Hamza ; Broadworth, 
Mark 
Subject: [PATCH] drm/amd/display: fix IPX enablement

We need to re-enable idle power optimizations after entering PSR. Since,
we get kicked out of idle power optimizations before entering PSR
(entering PSR requires us to write to DCN registers, which isn't allowed
while we are in IPS).

Fixes: bfe4f0b0e717 ("drm/amd/display: Add more checks for exiting idle in DC")
Tested-by: Mark Broadworth 
Signed-off-by: Hamza Mahfooz 
---
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c | 8 +---
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h | 2 +-
 2 files changed, 6 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
index a48a79e84e82..bfa090432ce2 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
@@ -141,9 +141,8 @@ bool amdgpu_dm_link_setup_psr(struct dc_stream_state 
*stream)
  * amdgpu_dm_psr_enable() - enable psr f/w
  * @stream: stream state
  *
- * Return: true if success
  */
-bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
+void amdgpu_dm_psr_enable(struct dc_stream_state *stream)
 {
 struct dc_link *link = stream->link;
 unsigned int vsync_rate_hz = 0;
@@ -190,7 +189,10 @@ bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
 if (link->psr_settings.psr_version < DC_PSR_VERSION_SU_1)
 power_opt |= psr_power_opt_z10_static_screen;

-   return dc_link_set_psr_allow_active(link, &psr_enable, false, false, 
&power_opt);
+   dc_link_set_psr_allow_active(link, &psr_enable, false, false, 
&power_opt);
+
+   if (link->ctx->dc->caps.ips_support)
+   dc_allow_idle_optimizations(link->ctx->dc, true);
 }

 /*
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h
index 6806b3c9c84b..1fdfd183c0d9 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h
@@ -32,7 +32,7 @@
 #define AMDGPU_DM_PSR_ENTRY_DELAY 5

 void amdgpu_dm_set_psr_caps(struct dc_link *link);
-bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
+void amdgpu_dm_psr_enable(struct dc_stream_state *stream);
 bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
 bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
 bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
--
2.44.0



Re: [PATCH 2/6] drm/amd/display: Add DP 2.0 HPO Stream Encoder

2021-08-17 Thread Kazlauskas, Nicholas

On 2021-08-16 4:59 p.m., Fangzhi Zuo wrote:

HW Blocks:

 ++  +-+  +--+
 |  OPTC  |  | HDA |  | HUBP |
 ++  +-+  +--+
 |  ||
 |  ||
 HPO |==||
  |  |  v|
  |  |   +-+ |
  |  |   | APG | |
  |  |   +-+ |
  |  |  ||
  v  v  vv
+--+
|  HPO Stream Encoder  |
+--+

Signed-off-by: Fangzhi Zuo 
---
  .../amd/display/dc/dcn10/dcn10_hw_sequencer.c |  35 +
  drivers/gpu/drm/amd/display/dc/dcn31/Makefile |   2 +-
  .../dc/dcn31/dcn31_hpo_dp_stream_encoder.c| 761 ++
  .../dc/dcn31/dcn31_hpo_dp_stream_encoder.h| 241 ++
  .../drm/amd/display/dc/dcn31/dcn31_resource.c |  85 ++
  .../gpu/drm/amd/display/dc/inc/core_types.h   |   4 +
  .../gpu/drm/amd/display/dc/inc/hw/hw_shared.h |   1 +
  .../amd/display/dc/inc/hw/stream_encoder.h|  79 ++
  drivers/gpu/drm/amd/display/dc/inc/resource.h |   4 +
  .../amd/display/include/grph_object_defs.h|  10 +
  .../drm/amd/display/include/grph_object_id.h  |   6 +
  11 files changed, 1227 insertions(+), 1 deletion(-)
  create mode 100644 
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c
  create mode 100644 
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.h

diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c 
b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index df8a7718a85f..cffd9e6f44b2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -466,6 +466,41 @@ void dcn10_log_hw_state(struct dc *dc,
  
  	log_mpc_crc(dc, log_ctx);
  
+#if defined(CONFIG_DRM_AMD_DC_DCN3_1)

+   {
+   int hpo_dp_link_enc_count = 0;
+
+   if (pool->hpo_dp_stream_enc_count > 0) {
+   DTN_INFO("DP HPO S_ENC:  Enabled  OTG   Format   Depth   Vid 
  SDP   Compressed  Link\n");
+   for (i = 0; i < pool->hpo_dp_stream_enc_count; i++) {
+   struct hpo_dp_stream_encoder_state 
hpo_dp_se_state = {0};
+   struct hpo_dp_stream_encoder *hpo_dp_stream_enc = 
pool->hpo_dp_stream_enc[i];
+
+   if (hpo_dp_stream_enc && 
hpo_dp_stream_enc->funcs->read_state) {
+   
hpo_dp_stream_enc->funcs->read_state(hpo_dp_stream_enc, &hpo_dp_se_state);
+
+   DTN_INFO("[%d]: %d%d   
%6s   %d %d %d%d %d\n",
+   hpo_dp_stream_enc->id - 
ENGINE_ID_HPO_DP_0,
+   
hpo_dp_se_state.stream_enc_enabled,
+   
hpo_dp_se_state.otg_inst,
+   (hpo_dp_se_state.pixel_encoding 
== 0) ? "4:4:4" :
+   
((hpo_dp_se_state.pixel_encoding == 1) ? "4:2:2" :
+   
(hpo_dp_se_state.pixel_encoding == 2) ? "4:2:0" : "Y-Only"),
+   
(hpo_dp_se_state.component_depth == 0) ? 6 :
+   
((hpo_dp_se_state.component_depth == 1) ? 8 :
+   
(hpo_dp_se_state.component_depth == 2) ? 10 : 12),
+   
hpo_dp_se_state.vid_stream_enabled,
+   
hpo_dp_se_state.sdp_enabled,
+   
hpo_dp_se_state.compressed_format,
+   
hpo_dp_se_state.mapped_to_link_enc);
+   }
+   }
+
+   DTN_INFO("\n");
+   }
+   }
+#endif
+
DTN_INFO_END();
  }
  
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/Makefile b/drivers/gpu/drm/amd/display/dc/dcn31/Makefile

index bc2087f6dcb2..8b811f589524 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/Makefile
@@ -12,7 +12,7 @@
  
  DCN31 = dcn31_resource.o dcn31_hubbub.o dcn31_hwseq.o dcn31_init.o dcn31_hubp.o \

dcn31_dccg.o dcn31_optc.o dcn31_dio_link_encoder.o dcn31_panel_cntl.o \
-   dcn31_apg.o
+   dcn31_apg.o dcn31_hpo_dp_stream_encoder.o
  
  ifdef CONFIG_X86

  CFLAGS_$(AMDDALPATH)/dc/dcn31/dcn31_resource.o := -msse
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_enco

Re: [PATCH v3 5/6] drm/amd/display: Add DP 2.0 BIOS and DMUB Support

2021-08-20 Thread Kazlauskas, Nicholas

On 2021-08-19 2:58 p.m., Fangzhi Zuo wrote:

Parse DP2 encoder caps and hpo instance from bios

Signed-off-by: Fangzhi Zuo 
---
  drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c | 10 ++
  drivers/gpu/drm/amd/display/dc/bios/command_table2.c   | 10 ++
  .../drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c  |  4 
  drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h   |  6 ++
  drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h|  4 
  .../gpu/drm/amd/display/include/bios_parser_types.h| 10 ++
  drivers/gpu/drm/amd/include/atomfirmware.h |  6 ++
  7 files changed, 50 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c 
b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index 6dbde74c1e06..cdb5c027411a 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -1604,6 +1604,16 @@ static enum bp_result bios_parser_get_encoder_cap_info(
ATOM_ENCODER_CAP_RECORD_HBR3_EN) ? 1 : 0;
info->HDMI_6GB_EN = (record->encodercaps &
ATOM_ENCODER_CAP_RECORD_HDMI6Gbps_EN) ? 1 : 0;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+   info->IS_DP2_CAPABLE = (record->encodercaps &
+   ATOM_ENCODER_CAP_RECORD_DP2) ? 1 : 0;
+   info->DP_UHBR10_EN = (record->encodercaps &
+   ATOM_ENCODER_CAP_RECORD_UHBR10_EN) ? 1 : 0;
+   info->DP_UHBR13_5_EN = (record->encodercaps &
+   ATOM_ENCODER_CAP_RECORD_UHBR13_5_EN) ? 1 : 0;
+   info->DP_UHBR20_EN = (record->encodercaps &
+   ATOM_ENCODER_CAP_RECORD_UHBR20_EN) ? 1 : 0;
+#endif
info->DP_IS_USB_C = (record->encodercaps &
ATOM_ENCODER_CAP_RECORD_USB_C_TYPE) ? 1 : 0;
  
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c

index f1f672a997d7..6e333b4af7d6 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
@@ -340,6 +340,13 @@ static enum bp_result transmitter_control_v1_7(
const struct command_table_helper *cmd = bp->cmd_helper;
struct dmub_dig_transmitter_control_data_v1_7 dig_v1_7 = {0};
  
+#if defined(CONFIG_DRM_AMD_DC_DCN)

+   uint8_t hpo_instance = (uint8_t)cntl->hpo_engine_id - ENGINE_ID_HPO_0;
+
+   if (dc_is_dp_signal(cntl->signal))
+   hpo_instance = (uint8_t)cntl->hpo_engine_id - 
ENGINE_ID_HPO_DP_0;
+#endif
+
dig_v1_7.phyid = cmd->phy_id_to_atom(cntl->transmitter);
dig_v1_7.action = (uint8_t)cntl->action;
  
@@ -353,6 +360,9 @@ static enum bp_result transmitter_control_v1_7(

dig_v1_7.hpdsel = cmd->hpd_sel_to_atom(cntl->hpd_sel);
dig_v1_7.digfe_sel = cmd->dig_encoder_sel_to_atom(cntl->engine_id);
dig_v1_7.connobj_id = (uint8_t)cntl->connector_obj_id.id;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+   dig_v1_7.HPO_instance = hpo_instance;
+#endif
dig_v1_7.symclk_units.symclk_10khz = cntl->pixel_clock/10;
  
  	if (cntl->action == TRANSMITTER_CONTROL_ENABLE ||

diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c 
b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c
index 46ea39f5ef8d..6f3c2fb60790 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c
@@ -192,6 +192,10 @@ void dcn30_link_encoder_construct(
enc10->base.features.flags.bits.IS_HBR3_CAPABLE =
bp_cap_info.DP_HBR3_EN;
enc10->base.features.flags.bits.HDMI_6GB_EN = 
bp_cap_info.HDMI_6GB_EN;
+   enc10->base.features.flags.bits.IS_DP2_CAPABLE = 
bp_cap_info.IS_DP2_CAPABLE;
+   enc10->base.features.flags.bits.IS_UHBR10_CAPABLE = 
bp_cap_info.DP_UHBR10_EN;
+   enc10->base.features.flags.bits.IS_UHBR13_5_CAPABLE = 
bp_cap_info.DP_UHBR13_5_EN;
+   enc10->base.features.flags.bits.IS_UHBR20_CAPABLE = 
bp_cap_info.DP_UHBR20_EN;


Please drop the DCN guards around this section - don't want to modify 
the bit field structure based on DCN vs DCE only.



enc10->base.features.flags.bits.DP_IS_USB_C =
bp_cap_info.DP_IS_USB_C;
} else {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h 
b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
index fa3a725e11dc..b99efcf4712f 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
@@ -59,6 +59,12 @@ struct encoder_feature_support {
uint32_t IS_TPS3_CAPABLE:1;
uint32_t IS_TPS4_CAPABLE:1;
uint32_t HDMI_6GB_EN:1;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+   uint32_t IS_DP2_CAPABLE:1;
+   

Re: [PATCH v2] drm/amd/display: Fix two cursor duplication when using overlay

2021-08-24 Thread Kazlauskas, Nicholas

On 2021-08-24 9:59 a.m., Simon Ser wrote:

Hi Rodrigo!

Thanks a lot for your reply! Comments below, please bear with me: I'm
a bit familiar with the cursor issues, but my knowledge of AMD hw is
still severely lacking.

On Wednesday, August 18th, 2021 at 15:18, Rodrigo Siqueira 
 wrote:


On 08/18, Simon Ser wrote:

Hm. This patch causes a regression for me. I was using primary + overlay
not covering the whole primary plane + cursor before. This patch breaks it.


Which branch are you using? Recently, I reverted part of that patch,
see:

   Revert "drm/amd/display: Fix overlay validation by considering cursors"


Right. This revert actually makes things worse. Prior to the revert the
overlay could be enabled without the cursor. With the revert the overlay
cannot be enabled at all, even if the cursor is disabled.


This patch makes the overlay plane very useless for me, because the primary
plane is always under the overlay plane.


I'm curious about your use case with overlay planes. Could you help me
to understand it better? If possible, describe:

1. Context and scenario
2. Compositor
3. Kernel version
4. If you know which IGT test describe your test?

I'm investigating overlay issues in our driver, and a userspace
perspective might help me.


I'm working on gamescope [1], Valve's gaming compositor. Our use-cases include
displaying (from bottom to top) a game in the background, a notification popup
over it in the overlay plane, and a cursor in the cursor plane. All of the
planes might be rotated. The game's buffer might be scaled and might not cover
the whole CRTC.

libliftoff [2] is used to provide vendor-agnostic KMS plane offload. In other
words, I'd prefer to avoid relying too much on hardware specific details, e.g.
I'd prefer to avoid hole-punching via a underlay (it might work on AMD hw, but
will fail on many other drivers).


Hi Simon,

Siqueria explained a bit below, but the problem is that we don't have 
dedicated cursor planes in hardware.


It's easiest to under the hardware cursor as being constrained within 
the DRM plane specifications. Each DRM plane maps to 1 (or 2) hardware 
pipes and the cursor has to be drawn along with it. The cursor will 
inherit the scale, bounds, and color management associated with the 
underlying pipes.


From the kernel display driver perspective that makes things quite 
difficult with the existing DRM API - we can only really guarantee you 
get HW cursor when the framebuffer covers the entire screen and it is 
unscaled or matches the scaling expected by the user.


Hole punching generally satisfies both of these since it's a transparent 
framebuffer that covers the entire screen.


The case that's slightly more complicated is when the overlay doesn't 
cover the entire screen but the primary plane does. We can still enable 
the cursor if the primary plane and overlay have a matching scale and 
color management - our display hardware can draw the cursor on multiple 
pipes. (Note: this statement only applies for DCN2.1+)


If the overlay plane does not cover the entire screen and the scale or 
the color management differs then we cannot enable the HW cursor plane. 
As you mouse over the bounds of the overlay you will see the cursor 
drawn differently on the primary and overlay pipe.


If the overlay plane and primary plane do not cover the entire screen 
then you will lose HW cursor outside of the union of their bounds.


Correct me if I'm wrong, but I think your usecase [1] falls under the 
category where:

1. Primary plane covers entire screen
2. Overlay plane does not cover the entire screen
3. Overlay plane is scaled

This isn't a support configuration because HW cursor cannot be drawn in 
the same position on both pipes.


I think you can see a similar usecase to [1] on Windows, but the 
difference is that the cursor is drawn on the "primary plane" instead of 
on top of the primary and overlay. I don't remember if DRM has a 
requirement that the cursor plane must be topmost, but we can't enable 
[1] as long as it is.


I don't know if you have more usecases in mind than [1], but just as 
some general recommendations I think you should only really use overlays 
when they fall under one of two categories:


1. You want to save power:

You will burn additional power for the overlay pipe.

But you will save power in use cases like video playback - where the 
decoder produces the framebuffer and we can avoid a shader composited 
copy with its associated GFX engine overhead and memory traffic.


2. You want more performance:

You will burn additional power for the overlay pipe.

On bandwidth constrained systems you can save significant memory 
bandwidth by avoiding the shader composition by allowing for direct 
scanout of game or other application buffers.


Your usecase [1] falls under this category, but as an aside I discourage 
trying to design usecases where the compositor requires the overlay for 
functional purposes.


Best regards,
Nicholas Kazlauskas



I'm

Re: [PATCH 3/7] drm/amd/display: Use vblank control events for PSR enable/disable

2021-09-07 Thread Kazlauskas, Nicholas

On 2021-09-04 10:36 a.m., Mike Lothian wrote:

Hi

This patch is causing issues on my PRIME system

I've opened 
https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Fgitlab.freedesktop.org%2Fdrm%2Famd%2F-%2Fissues%2F1700&data=04%7C01%7Cnicholas.kazlauskas%40amd.com%7Cd230db90a08d4b53011508d96fb15eb4%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C637663629862006687%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C1000&sdata=fH8mvhyHDchXqxZ5dlsyp7KqrzxkuymV%2BHtBmzVUD3I%3D&reserved=0
 to track

Cheers

Mike


We don't create the workqueue on headless configs so I guess all the 
instances of flush need to be guarded with NULL checks first.


Thanks for reporting this!

Regards,
Nicholas Kazlauskas




On Fri, 13 Aug 2021 at 07:35, Wayne Lin  wrote:


From: Nicholas Kazlauskas 

[Why]
PSR can disable the HUBP along with the OTG when PSR is active.

We'll hit a pageflip timeout when the OTG is disable because we're no
longer updating the CRTC vblank counter and the pflip high IRQ will
not fire on the flip.

In order to flip the page flip timeout occur we should modify the
enter/exit conditions to match DRM requirements.

[How]
Use our deferred handlers for DRM vblank control to notify DMCU(B)
when it can enable or disable PSR based on whether vblank is disabled or
enabled respectively.

We'll need to pass along the stream with the notification now because
we want to access the CRTC state while the CRTC is locked to get the
stream state prior to the commit.

Retain a reference to the stream so it remains safe to continue to
access and release that reference once we're done with it.

Enable/disable logic follows what we were previously doing in
update_planes.

The workqueue has to be flushed before programming streams or planes
to ensure that we exit out of idle optimizations and PSR before
these events occur if necessary.

To keep the skip count logic the same to avoid FBCON PSR enablement
requires copying the allow condition onto the DM IRQ parameters - a
field that we can actually access from the worker.

Reviewed-by: Roman Li 
Acked-by: Wayne Lin 
Signed-off-by: Nicholas Kazlauskas 
---
  .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 48 +++
  .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h |  2 +
  .../display/amdgpu_dm/amdgpu_dm_irq_params.h  |  1 +
  3 files changed, 43 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index f88b6c5b83cd..cebd663b6708 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1061,7 +1061,22 @@ static void vblank_control_worker(struct work_struct 
*work)

 DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", 
dm->active_vblank_irq_count == 0);

+   /* Control PSR based on vblank requirements from OS */
+   if (vblank_work->stream && vblank_work->stream->link) {
+   if (vblank_work->enable) {
+   if 
(vblank_work->stream->link->psr_settings.psr_allow_active)
+   amdgpu_dm_psr_disable(vblank_work->stream);
+   } else if (vblank_work->stream->link->psr_settings.psr_feature_enabled 
&&
+  !vblank_work->stream->link->psr_settings.psr_allow_active 
&&
+  vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
+   amdgpu_dm_psr_enable(vblank_work->stream);
+   }
+   }
+
 mutex_unlock(&dm->dc_lock);
+
+   dc_stream_release(vblank_work->stream);
+
 kfree(vblank_work);
  }

@@ -6018,6 +6033,11 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, 
bool enable)
 work->acrtc = acrtc;
 work->enable = enable;

+   if (acrtc_state->stream) {
+   dc_stream_retain(acrtc_state->stream);
+   work->stream = acrtc_state->stream;
+   }
+
 queue_work(dm->vblank_control_workqueue, &work->work);
  #endif

@@ -8623,6 +8643,12 @@ static void amdgpu_dm_commit_planes(struct 
drm_atomic_state *state,
 /* Update the planes if changed or disable if we don't have any. */
 if ((planes_count || acrtc_state->active_planes == 0) &&
 acrtc_state->stream) {
+   /*
+* If PSR or idle optimizations are enabled then flush out
+* any pending work before hardware programming.
+*/
+   flush_workqueue(dm->vblank_control_workqueue);
+
 bundle->stream_update.stream = acrtc_state->stream;
 if (new_pcrtc_state->mode_changed) {
 bundle->stream_update.src = acrtc_state->stream->src;
@@ -8691,16 +8717,20 @@ static void amdgpu_dm_commit_planes(struct 
drm_atomic_state *state,
 acrtc_state->stream->link->psr_settings.psr_version != 
DC_PSR_VERSION_UNSUPPORTED &&
   

Re: [PATCH] amd/display: downgrade validation failure log level

2021-09-07 Thread Kazlauskas, Nicholas

On 2021-09-07 10:19 a.m., Simon Ser wrote:

In amdgpu_dm_atomic_check, dc_validate_global_state is called. On
failure this logs a warning to the kernel journal. However warnings
shouldn't be used for atomic test-only commit failures: user-space
might be perfoming a lot of atomic test-only commits to find the
best hardware configuration.

Downgrade the log to a regular DRM atomic message. While at it, use
the new device-aware logging infrastructure.

This fixes error messages in the kernel when running gamescope [1].

[1]: https://github.com/Plagman/gamescope/issues/245

Signed-off-by: Simon Ser 
Cc: Alex Deucher 
Cc: Harry Wentland 
Cc: Nicholas Kazlauskas 


Makes sense since validation can fail. Thanks for the patch!

Reviewed-by: Nicholas Kazlauskas 

Regards,
Nicholas Kazlauskas


---
  drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 3 ++-
  1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 986c9d29d686..6f3b6f2a952c 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -10467,7 +10467,8 @@ static int amdgpu_dm_atomic_check(struct drm_device 
*dev,
goto fail;
status = dc_validate_global_state(dc, dm_state->context, false);
if (status != DC_OK) {
-   DC_LOG_WARNING("DC global validation failure: %s (%d)",
+   drm_dbg_atomic(dev,
+  "DC global validation failure: %s (%d)",
   dc_status_to_str(status), status);
ret = -EINVAL;
goto fail;





Re: [PATCH] drm/amd/display: Fix white screen page fault for gpuvm

2021-09-13 Thread Kazlauskas, Nicholas

On 2021-09-13 3:13 p.m., Alex Deucher wrote:

Acked-by: Alex Deucher 

Can you add a fixes: tag?

Alex


Sure, I think the relevant patch is:

Fixes: 64b1d0e8d50 ("drm/amd/display: Add DCN3.1 HWSEQ")

Regards,
Nicholas Kazlauskas



On Mon, Sep 13, 2021 at 3:11 PM Nicholas Kazlauskas
 wrote:


[Why]
The "base_addr_is_mc_addr" field was added for dcn3.1 support but
pa_config was never updated to set it to false.

Uninitialized memory causes it to be set to true which results in
address mistranslation and white screen.

[How]
Use memset to ensure all fields are initialized to 0 by default.

Cc: Aaron Liu 
Signed-off-by: Nicholas Kazlauskas 
---
  drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 2 ++
  1 file changed, 2 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 53363728dbb..b0426bb3f2e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1125,6 +1125,8 @@ static void mmhub_read_system_context(struct 
amdgpu_device *adev, struct dc_phy_
 uint32_t agp_base, agp_bot, agp_top;
 PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;

+   memset(pa_config, 0, sizeof(*pa_config));
+
 logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);

--
2.25.1





Re: [PATCH] drm/amd/display: Enable PSR by default on DCN3.1

2021-10-12 Thread Kazlauskas, Nicholas

On 2021-10-11 1:04 a.m., Vishwakarma, Pratik wrote:


On 10/8/2021 9:44 PM, Nicholas Kazlauskas wrote:

[Why]
New idle optimizations for DCN3.1 require PSR for optimal power savings
on panels that support it.

This was previously left disabled by default because of issues with
compositors that do not pageflip and scan out directly to the
frontbuffer.

For these compositors we now have detection methods that wait for x
number of pageflips after a full update - triggered by a buffer or
format change typically.

This may introduce bugs or new cases not tested by users so this is
only currently targeting DCN31.

[How]
Add code in DM to set PSR state by default for DCN3.1 while falling
back to the feature mask for older DCN.

Add a global debug flag that can be set to disable it for either.

Cc: Harry Wentland
Cc: Roman Li
Signed-off-by: Nicholas Kazlauskas
---
  .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c   | 17 -
  drivers/gpu/drm/amd/include/amd_shared.h|  5 +++--
  2 files changed, 19 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index dc595ecec595..ff545503a6ed 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -4031,6 +4031,7 @@ static int amdgpu_dm_initialize_drm_device(struct 
amdgpu_device *adev)
int32_t primary_planes;
enum dc_connection_type new_connection_type = dc_connection_none;
const struct dc_plane_cap *plane;
+   bool psr_feature_enabled = false;
  
  	dm->display_indexes_num = dm->dc->caps.max_streams;

/* Update the actual used number of crtc */
@@ -4113,6 +4114,19 @@ static int amdgpu_dm_initialize_drm_device(struct 
amdgpu_device *adev)
DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
  adev->ip_versions[DCE_HWIP][0]);
}
+
+   /* Determine whether to enable PSR support by default. */
+   if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
+   switch (adev->ip_versions[DCE_HWIP][0]) {
+   case IP_VERSION(3, 1, 2):
+   case IP_VERSION(3, 1, 3):
+   psr_feature_enabled = true;
+   break;
+   default:
+   psr_feature_enabled = amdgpu_dc_feature_mask & 
DC_PSR_MASK;
+   break;
+   }
+   }
  #endif
  
  	/* loops over all connectors on the board */

@@ -4156,7 +4170,8 @@ static int amdgpu_dm_initialize_drm_device(struct 
amdgpu_device *adev)
} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
amdgpu_dm_update_connector_after_detect(aconnector);
register_backlight_device(dm, link);
-   if (amdgpu_dc_feature_mask & DC_PSR_MASK)
+
+   if (psr_feature_enabled)
amdgpu_dm_set_psr_caps(link);
}
  
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h

index 257f280d3d53..f1a46d16f7ea 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -228,7 +228,7 @@ enum DC_FEATURE_MASK {
DC_FBC_MASK = (1 << 0), //0x1, disabled by default
DC_MULTI_MON_PP_MCLK_SWITCH_MASK = (1 << 1), //0x2, enabled by default
DC_DISABLE_FRACTIONAL_PWM_MASK = (1 << 2), //0x4, disabled by default
-   DC_PSR_MASK = (1 << 3), //0x8, disabled by default
+   DC_PSR_MASK = (1 << 3), //0x8, disabled by default for dcn < 3.1
DC_EDP_NO_POWER_SEQUENCING = (1 << 4), //0x10, disabled by default
  };
  
@@ -236,7 +236,8 @@ enum DC_DEBUG_MASK {

DC_DISABLE_PIPE_SPLIT = 0x1,
DC_DISABLE_STUTTER = 0x2,
DC_DISABLE_DSC = 0x4,
-   DC_DISABLE_CLOCK_GATING = 0x8
+   DC_DISABLE_CLOCK_GATING = 0x8,
+   DC_DISABLE_PSR = 0x10,


Don't we need a corresponding check in amdgpu_dm_init() to disable PSR 
in runtime?


The check is `if (psr_feature_enabled)` above.


Also, how does it handle conflicting declarations from feature mask and 
debug mask?


Feature enable mask is used for older ASIC to allow PSR to be enabled.

For both old and new ASIC the DISABLE mask takes priority as a debug 
option for disabling PSR support.


Regards,
Nicholas Kazlauskas



/BR
/

/Pratik
/


  };
  
  enum amd_dpm_forced_level;




Re: [PATCH 16/27] drm/amd/display: increase Z9 latency to workaround underflow in Z9

2021-10-18 Thread Kazlauskas, Nicholas

On 2021-10-15 7:53 p.m., Mike Lothian wrote:

This patch seems to change z8 - not that I know what z8 or z9 are


It's a little misleading but the patch and terminology is correct.

Z9 is the usecase for these watermarks even if the calculation is shared 
with Z8/Z9.


Regards,
Nicholas Kazlauskas



On Fri, 15 Oct 2021 at 19:44, Agustin Gutierrez
 wrote:


From: Eric Yang 

[Why]
Z9 latency is higher than when we originally tuned the watermark
parameters, causing underflow. Increasing the value until the latency
issues is resolved.

Reviewed-by: Nicholas Kazlauskas 
Acked-by: Agustin Gutierrez Sanchez 
Signed-off-by: Eric Yang 
---
  drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c | 4 ++--
  1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c 
b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
index c9d3d691f4c6..12ebd9f8912f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
@@ -222,8 +222,8 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_1_soc = {
 .num_states = 5,
 .sr_exit_time_us = 9.0,
 .sr_enter_plus_exit_time_us = 11.0,
-   .sr_exit_z8_time_us = 402.0,
-   .sr_enter_plus_exit_z8_time_us = 520.0,
+   .sr_exit_z8_time_us = 442.0,
+   .sr_enter_plus_exit_z8_time_us = 560.0,
 .writeback_latency_us = 12.0,
 .dram_channel_width_bytes = 4,
 .round_trip_ping_latency_dcfclk_cycles = 106,
--
2.25.1





Re: [PATCH] drm/amdgpu/display: add yellow carp B0 with rest of driver

2021-10-20 Thread Kazlauskas, Nicholas

On 2021-10-20 9:53 a.m., Alex Deucher wrote:

Fix revision id.

Fixes: 626cbb641f1052 ("drm/amdgpu: support B0&B1 external revision id for yellow 
carp")
Signed-off-by: Alex Deucher 


Reviewed-by: Nicholas Kazlauskas 

Regards,
Nicholas Kazlauskas


---
  drivers/gpu/drm/amd/display/include/dal_asic_id.h | 2 +-
  1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h 
b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
index a9974f12f7fb..e4a2dfacab4c 100644
--- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h
+++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
@@ -228,7 +228,7 @@ enum {
  #define FAMILY_YELLOW_CARP 146
  
  #define YELLOW_CARP_A0 0x01

-#define YELLOW_CARP_B0 0x1A
+#define YELLOW_CARP_B0 0x20
  #define YELLOW_CARP_UNKNOWN 0xFF
  
  #ifndef ASICREV_IS_YELLOW_CARP






Re: [PATCH 32/33] drm/amd/display: fix link training regression for 1 or 2 lane

2021-10-25 Thread Kazlauskas, Nicholas

On 2021-10-25 9:58 a.m., Harry Wentland wrote:



On 2021-10-25 07:25, Paul Menzel wrote:

Dear Wenjing, dear Rodrigo,


On 24.10.21 15:31, Rodrigo Siqueira wrote:

From: Wenjing Liu 

[why]
We have a regression that cause maximize lane settings to use
uninitialized data from unused lanes.


Which commit caused the regression? Please amend the commit message.


This will cause link training to fail for 1 or 2 lanes because the lane
adjust is populated incorrectly sometimes.


On what card did you test this, and how can it be reproduced?

Please describe the fix/implemantation in the commit message.


Reviewed-by: Eric Yang 
Acked-by: Rodrigo Siqueira 
Signed-off-by: Wenjing Liu 
---
   .../gpu/drm/amd/display/dc/core/dc_link_dp.c  | 35 +--
   1 file changed, 32 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 653279ab96f4..f6ba7c734f54 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -108,6 +108,9 @@ static struct dc_link_settings 
get_common_supported_link_settings(
   struct dc_link_settings link_setting_b);
   static void maximize_lane_settings(const struct link_training_settings 
*lt_settings,
   struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]);
+static void override_lane_settings(const struct link_training_settings 
*lt_settings,
+    struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]);
+
   static uint32_t get_cr_training_aux_rd_interval(struct dc_link *link,
   const struct dc_link_settings *link_settings)
   {
@@ -734,15 +737,13 @@ void dp_decide_lane_settings(
   }
   #endif
   }
-
-    /* we find the maximum of the requested settings across all lanes*/
-    /* and set this maximum for all lanes*/
   dp_hw_to_dpcd_lane_settings(lt_settings, hw_lane_settings, 
dpcd_lane_settings);
     if (lt_settings->disallow_per_lane_settings) {
   /* we find the maximum of the requested settings across all lanes*/
   /* and set this maximum for all lanes*/
   maximize_lane_settings(lt_settings, hw_lane_settings);
+    override_lane_settings(lt_settings, hw_lane_settings);
     if (lt_settings->always_match_dpcd_with_hw_lane_settings)
   dp_hw_to_dpcd_lane_settings(lt_settings, hw_lane_settings, 
dpcd_lane_settings);
@@ -833,6 +834,34 @@ static void maximize_lane_settings(const struct 
link_training_settings *lt_setti
   }
   }
   +static void override_lane_settings(const struct link_training_settings 
*lt_settings,
+    struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX])
+{
+    uint32_t lane;
+
+    if (lt_settings->voltage_swing == NULL &&
+    lt_settings->pre_emphasis == NULL &&
+#if defined(CONFIG_DRM_AMD_DC_DP2_0)
+    lt_settings->ffe_preset == NULL &&
+#endif
+    lt_settings->post_cursor2 == NULL)
+
+    return;
+
+    for (lane = 1; lane < LANE_COUNT_DP_MAX; lane++) {
+    if (lt_settings->voltage_swing)
+    lane_settings[lane].VOLTAGE_SWING = *lt_settings->voltage_swing;
+    if (lt_settings->pre_emphasis)
+    lane_settings[lane].PRE_EMPHASIS = *lt_settings->pre_emphasis;
+    if (lt_settings->post_cursor2)
+    lane_settings[lane].POST_CURSOR2 = *lt_settings->post_cursor2;
+#if defined(CONFIG_DRM_AMD_DC_DP2_0)
+    if (lt_settings->ffe_preset)
+    lane_settings[lane].FFE_PRESET = *lt_settings->ffe_preset;
+#endif


Normally these checks should be done in C and not the preprocessor. `if 
CONFIG(DRM_AMD_DC_DP2_0)` or similar should work.



Interesting. I've never seen this before. Do you have an example or link to a 
doc? A cursory search doesn't yield any results but I might not be searching 
for the right thing.

Harry


I'm curious about this too. The compiler with optimizations should 
remove the constant check, but technically the C standard only permits 
it - it doesn't guarantee that it happens.


However, this patch should actually be changed to drop these 
CONFIG_DRM_AMD_DC_DP2_0 guards - this isn't a Kconfig option nor will 
there be one specifically for DP2. This should be folded under the DCN 
support.


Regards,
Nicholas Kazlauskas




+    }
+}
+
   enum dc_status dp_get_lane_status_and_lane_adjust(
   struct dc_link *link,
   const struct link_training_settings *link_training_setting,




Kind regards,

Paul






Re: [PATCH] drm/amd/display: Fix error handling on waiting for completion

2021-10-26 Thread Kazlauskas, Nicholas

On 2021-10-26 7:07 a.m., Stylon Wang wrote:

[Why]
In GNOME Settings->Display the switching from mirror mode to single display
occasionally causes wait_for_completion_interruptible_timeout() to return
-ERESTARTSYS and fails atomic check.

[How]
Replace the call with wait_for_completion_timeout() since the waiting for
hw_done and flip_done completion doesn't need to worry about interruption
from signal.

Signed-off-by: Stylon Wang 


I think this is okay, but I'll write out how I think these work here in 
case anyone has corrections.


Both variants allow the thread to sleep, but the interruptible variant 
can waken due to signals. These signals are a secondary wakeup event and 
would require use to restart the wait and (probably) keep track of how 
long we were waiting before.


We want wakeup only on completion, so we should be using the 
`wait_for_completion_timeout()` variants instead in most (if not all?) 
cases in our display driver.


This probably has some nuances that matter more for different variants 
of UAPI, but with this understanding I think this is:


Reviewed-by: Nicholas Kazlauskas 

Now, if we could revive that patch series I had from the other year and 
outright drop `do_aquire_global_lock()`...


Regards,
Nicholas Kazlauskas


---
  drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 4 ++--
  1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 4cd64529b180..b8f4ff323de1 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -9844,10 +9844,10 @@ static int do_aquire_global_lock(struct drm_device *dev,
 * Make sure all pending HW programming completed and
 * page flips done
 */
-   ret = 
wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
+   ret = wait_for_completion_timeout(&commit->hw_done, 10*HZ);
  
  		if (ret > 0)

-   ret = wait_for_completion_interruptible_timeout(
+   ret = wait_for_completion_timeout(
&commit->flip_done, 10*HZ);
  
  		if (ret == 0)






Re: [PATCH] drm/amd/display: Fix error handling on waiting for completion

2021-10-26 Thread Kazlauskas, Nicholas

On 2021-10-26 11:51 a.m., Michel Dänzer wrote:

On 2021-10-26 13:07, Stylon Wang wrote:

[Why]
In GNOME Settings->Display the switching from mirror mode to single display
occasionally causes wait_for_completion_interruptible_timeout() to return
-ERESTARTSYS and fails atomic check.

[How]
Replace the call with wait_for_completion_timeout() since the waiting for
hw_done and flip_done completion doesn't need to worry about interruption
from signal.

Signed-off-by: Stylon Wang 
---
  drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 4 ++--
  1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 4cd64529b180..b8f4ff323de1 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -9844,10 +9844,10 @@ static int do_aquire_global_lock(struct drm_device *dev,
 * Make sure all pending HW programming completed and
 * page flips done
 */
-   ret = 
wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
+   ret = wait_for_completion_timeout(&commit->hw_done, 10*HZ);
  
  		if (ret > 0)

-   ret = wait_for_completion_interruptible_timeout(
+   ret = wait_for_completion_timeout(
&commit->flip_done, 10*HZ);
  
  		if (ret == 0)




The *_interruptible_* variant is needed so that the display manager process can 
be killed while it's waiting here, which could take up to 10 seconds (per the 
timeout).

What's the problem with -ERESTARTSYS? Either the ioctl should be restarted 
automatically, or if it bounces back to user space, that needs to be able to 
retry the ioctl while it returns -1 and errno == EINTR. drmIoctl handles this 
transparently.




Thanks for the insight Michel!

If it's just an error in the log without a functional issue then maybe 
we should downgrade it to a debug statement in the case where it returns 
-ERESTARTSYS.


If this is a functional issue (DRM not automatically retrying the 
commit?) then maybe we should take a deeper look into the IOCTL itself.


Regards,
Nicholas Kazlauskas



Re: [PATCH] drm/amdgpu/display: fix build when CONFIG_DRM_AMD_DC_DCN is not set

2021-10-28 Thread Kazlauskas, Nicholas

On 2021-10-28 10:46 a.m., Alex Deucher wrote:

ping

On Wed, Oct 27, 2021 at 6:40 PM Alex Deucher  wrote:


Need to guard some things with CONFIG_DRM_AMD_DC_DCN.

Fixes: 707021dc0e16f6 ("drm/amd/display: Enable dpia in dmub only for DCN31 B0")
Signed-off-by: Alex Deucher 


Reviewed-by: Nicholas Kazlauskas 

Though this whole function could be guarded by DCN, DMUB doesn't exist 
on DCE.


Regards,
Nicholas Kazlauskas


---
  drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 2 ++
  1 file changed, 2 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 3f36dbb2c663..6dd6262f2769 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1108,7 +1108,9 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
 case CHIP_YELLOW_CARP:
 if (dc->ctx->asic_id.hw_internal_rev != YELLOW_CARP_A0) {
 hw_params.dpia_supported = true;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
 hw_params.disable_dpia = 
dc->debug.dpia_debug.bits.disable_dpia;
+#endif
 }
 break;
 default:
--
2.31.1





Re: [PATCH] drm/amdgpu/display: fix build when CONFIG_DRM_AMD_DC_DCN is not set

2021-10-28 Thread Kazlauskas, Nicholas

On 2021-10-28 10:46 a.m., Alex Deucher wrote:

Ping

On Wed, Oct 27, 2021 at 6:40 PM Alex Deucher  wrote:


Need to guard some things with CONFIG_DRM_AMD_DC_DCN.

Fixes: 0c865d1d817b77 ("drm/amd/display: fix link training regression for 1 or 2 
lane")
Signed-off-by: Alex Deucher 


Reviewed-by: Nicholas Kazlauskas 

Regards,
Nicholas Kazlauskas


---
  drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 11 +++
  1 file changed, 7 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index a9e940bd7e83..49a4d8e85bf8 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -840,9 +840,11 @@ static void override_lane_settings(const struct 
link_training_settings *lt_setti
 uint32_t lane;

 if (lt_settings->voltage_swing == NULL &&
-   lt_settings->pre_emphasis == NULL &&
-   lt_settings->ffe_preset == NULL &&
-   lt_settings->post_cursor2 == NULL)
+   lt_settings->pre_emphasis == NULL &&
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+   lt_settings->ffe_preset == NULL &&
+#endif
+   lt_settings->post_cursor2 == NULL)

 return;

@@ -853,9 +855,10 @@ static void override_lane_settings(const struct 
link_training_settings *lt_setti
 lane_settings[lane].PRE_EMPHASIS = 
*lt_settings->pre_emphasis;
 if (lt_settings->post_cursor2)
 lane_settings[lane].POST_CURSOR2 = 
*lt_settings->post_cursor2;
-
+#if defined(CONFIG_DRM_AMD_DC_DCN)
 if (lt_settings->ffe_preset)
 lane_settings[lane].FFE_PRESET = 
*lt_settings->ffe_preset;
+#endif
 }
  }

--
2.31.1





Re: [PATCH] drm/amd/display: Reduce dmesg error to a debug print

2021-11-12 Thread Kazlauskas, Nicholas

On 2021-11-12 10:56 a.m., Leo (Hanghong) Ma wrote:

[Why & How]
Dmesg errors are found on dcn3.1 during reset test, but it's not
a really failure. So reduce it to a debug print.

Signed-off-by: Leo (Hanghong) Ma 


This is expected to occur on displays that aren't connected/don't 
support LTTPR so this is fine.


Reviewed-by: Nicholas Kazlauskas 

Regards,
Nicholas Kazlauskas


---
  drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 2 +-
  1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index cb7bf9148904..c7785e29b1c0 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -4454,7 +4454,7 @@ bool dp_retrieve_lttpr_cap(struct dc_link *link)
lttpr_dpcd_data,
sizeof(lttpr_dpcd_data));
if (status != DC_OK) {
-   dm_error("%s: Read LTTPR caps data failed.\n", 
__func__);
+   DC_LOG_DP2("%s: Read LTTPR caps data failed.\n", 
__func__);
return false;
}
  





Re: [PATCH v1] drm/amd/display: Add DP-HDMI PCON SST Support

2021-11-24 Thread Kazlauskas, Nicholas

On 2021-11-24 12:28 p.m., Fangzhi Zuo wrote:

1. Parse DSC caps from PCON DPCD
2. Determine policy if decoding DSC at PCON
3. Enable/disable DSC at PCON

Signed-off-by: Fangzhi Zuo 


Reviewed-by: Nicholas Kazlauskas 

Regards,
Nicholas Kazlauskas


---
  .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 41 +++
  .../amd/display/amdgpu_dm/amdgpu_dm_helpers.c | 13 +-
  2 files changed, 44 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 9a1ac657faa2..9dbf6bf3f1c3 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -6047,10 +6047,12 @@ static void update_dsc_caps(struct amdgpu_dm_connector 
*aconnector,
  
  	if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||

sink->sink_signal == SIGNAL_TYPE_EDP)) {
-   dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
- 
aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
- 
aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
- dsc_caps);
+   if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE ||
+   sink->link->dpcd_caps.dongle_type == 
DISPLAY_DONGLE_DP_HDMI_CONVERTER)
+   dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
+   
aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
+   
aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
+   dsc_caps);
}
  }
  
@@ -6120,6 +6122,8 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,

uint32_t link_bandwidth_kbps;
uint32_t max_dsc_target_bpp_limit_override = 0;
struct dc *dc = sink->ctx->dc;
+   uint32_t max_supported_bw_in_kbps, timing_bw_in_kbps;
+   uint32_t dsc_max_supported_bw_in_kbps;
  
  	link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,


dc_link_get_link_cap(aconnector->dc_link));
@@ -6138,16 +6142,37 @@ static void apply_dsc_policy_for_stream(struct 
amdgpu_dm_connector *aconnector,
apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, 
max_dsc_target_bpp_limit_override);
  
  	} else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {

-
-   if 
(dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
+   if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
+   if 
(dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
dsc_caps,

aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,

max_dsc_target_bpp_limit_override,
link_bandwidth_kbps,
&stream->timing,
&stream->timing.dsc_cfg)) {
-   stream->timing.flags.DSC = 1;
-   DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", 
__func__, drm_connector->name);
+   stream->timing.flags.DSC = 1;
+   DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST 
RX\n",
+__func__, 
drm_connector->name);
+   }
+   } else if (sink->link->dpcd_caps.dongle_type == 
DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
+   timing_bw_in_kbps = 
dc_bandwidth_in_kbps_from_timing(&stream->timing);
+   max_supported_bw_in_kbps = link_bandwidth_kbps;
+   dsc_max_supported_bw_in_kbps = link_bandwidth_kbps;
+
+   if (timing_bw_in_kbps > max_supported_bw_in_kbps &&
+   max_supported_bw_in_kbps > 0 &&
+   dsc_max_supported_bw_in_kbps > 0)
+   if 
(dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
+   dsc_caps,
+   
aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
+   
max_dsc_target_bpp_limit_override,
+   dsc_max_supported_bw_in_kbps,
+   &stream->timing,
+   &stream->timing.dsc_cfg)) {
+   stream->timi

Re: [PATCH v2] drm/amd/display: Add DP-HDMI FRL PCON Support in DC

2021-11-26 Thread Kazlauskas, Nicholas

On 2021-11-26 9:32 a.m., Fangzhi Zuo wrote:

Change since v1: add brief description
1. Add hdmi frl pcon support to existing asic family.
2. Determine pcon frl capability based on pcon dpcd.
3. pcon frl is taken into consideration into mode validation.

Signed-off-by: Fangzhi Zuo 


Reviewed-by: Nicholas Kazlauskas 

I think we probably should be using the DP DPCD defines directly instead 
our own unions, though. Maybe as a cleanup later.


Regards,
Nicholas Kazlauskas


---
  drivers/gpu/drm/amd/display/dc/core/dc_link.c | 15 
  .../gpu/drm/amd/display/dc/core/dc_link_dp.c  | 71 +++
  drivers/gpu/drm/amd/display/dc/dc.h   |  6 ++
  drivers/gpu/drm/amd/display/dc/dc_dp_types.h  | 31 
  drivers/gpu/drm/amd/display/dc/dc_hw_types.h  |  3 +
  drivers/gpu/drm/amd/display/dc/dc_link.h  |  1 +
  drivers/gpu/drm/amd/display/dc/dc_types.h |  1 +
  .../drm/amd/display/dc/dcn20/dcn20_resource.c |  2 +
  .../drm/amd/display/dc/dcn21/dcn21_resource.c |  2 +
  .../drm/amd/display/dc/dcn30/dcn30_resource.c |  2 +
  .../drm/amd/display/dc/dcn31/dcn31_resource.c |  1 +
  11 files changed, 135 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 3d08f8eba402..dad7a4fdc427 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -2750,8 +2750,23 @@ static bool dp_active_dongle_validate_timing(
return false;
}
  
+#if defined(CONFIG_DRM_AMD_DC_DCN)

+   if (dongle_caps->dp_hdmi_frl_max_link_bw_in_kbps > 0) { // DP to HDMI 
FRL converter
+   struct dc_crtc_timing outputTiming = *timing;
+
+   if (timing->flags.DSC && !timing->dsc_cfg.is_frl)
+   /* DP input has DSC, HDMI FRL output doesn't have DSC, 
remove DSC from output timing */
+   outputTiming.flags.DSC = 0;
+   if (dc_bandwidth_in_kbps_from_timing(&outputTiming) > 
dongle_caps->dp_hdmi_frl_max_link_bw_in_kbps)
+   return false;
+   } else { // DP to HDMI TMDS converter
+   if (get_timing_pixel_clock_100hz(timing) > 
(dongle_caps->dp_hdmi_max_pixel_clk_in_khz * 10))
+   return false;
+   }
+#else
if (get_timing_pixel_clock_100hz(timing) > 
(dongle_caps->dp_hdmi_max_pixel_clk_in_khz * 10))
return false;
+#endif
  
  #if defined(CONFIG_DRM_AMD_DC_DCN)

}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 84f3545c3032..da1532356c07 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -4313,6 +4313,56 @@ static int translate_dpcd_max_bpc(enum 
dpcd_downstream_port_max_bpc bpc)
return -1;
  }
  
+#if defined(CONFIG_DRM_AMD_DC_DCN)

+uint32_t dc_link_bw_kbps_from_raw_frl_link_rate_data(uint8_t bw)
+{
+   switch (bw) {
+   case 0b001:
+   return 900;
+   case 0b010:
+   return 1800;
+   case 0b011:
+   return 2400;
+   case 0b100:
+   return 3200;
+   case 0b101:
+   return 4000;
+   case 0b110:
+   return 4800;
+   }
+
+   return 0;
+}
+
+/**
+ * Return PCON's post FRL link training supported BW if its non-zero, 
otherwise return max_supported_frl_bw.
+ */
+static uint32_t intersect_frl_link_bw_support(
+   const uint32_t max_supported_frl_bw_in_kbps,
+   const union hdmi_encoded_link_bw hdmi_encoded_link_bw)
+{
+   uint32_t supported_bw_in_kbps = max_supported_frl_bw_in_kbps;
+
+   // HDMI_ENCODED_LINK_BW bits are only valid if HDMI Link Configuration 
bit is 1 (FRL mode)
+   if (hdmi_encoded_link_bw.bits.FRL_MODE) {
+   if (hdmi_encoded_link_bw.bits.BW_48Gbps)
+   supported_bw_in_kbps = 4800;
+   else if (hdmi_encoded_link_bw.bits.BW_40Gbps)
+   supported_bw_in_kbps = 4000;
+   else if (hdmi_encoded_link_bw.bits.BW_32Gbps)
+   supported_bw_in_kbps = 3200;
+   else if (hdmi_encoded_link_bw.bits.BW_24Gbps)
+   supported_bw_in_kbps = 2400;
+   else if (hdmi_encoded_link_bw.bits.BW_18Gbps)
+   supported_bw_in_kbps = 1800;
+   else if (hdmi_encoded_link_bw.bits.BW_9Gbps)
+   supported_bw_in_kbps = 900;
+   }
+
+   return supported_bw_in_kbps;
+}
+#endif
+
  static void read_dp_device_vendor_id(struct dc_link *link)
  {
struct dp_device_vendor_id dp_id;
@@ -4424,6 +4474,27 @@ static void get_active_converter_info(
translate_dpcd_max_bpc(

hdmi_color_caps.bits.MAX_BITS_PER_COLOR_COMPONENT);
  

Re: [PATCH v2] drm/amd/display: Use oriented source size when checking cursor scaling

2021-12-02 Thread Kazlauskas, Nicholas

On 2021-12-02 7:52 a.m., Vlad Zahorodnii wrote:

dm_check_crtc_cursor() doesn't take into account plane transforms when
calculating plane scaling, this can result in false positives.

For example, if there's an output with resolution 3840x2160 and the
output is rotated 90 degrees, CRTC_W and CRTC_H will be 3840 and 2160,
respectively, but SRC_W and SRC_H will be 2160 and 3840, respectively.

Since the cursor plane usually has a square buffer attached to it, the
dm_check_crtc_cursor() will think that there's a scale factor mismatch
even though there isn't really.

This fixes an issue where kwin fails to use hardware plane transforms.

Changes since version 1:
- s/orientated/oriented/g

Signed-off-by: Vlad Zahorodnii 


This looks correct to me. I guess it's also not modifying the actual 
programming position, just the check to ensure that the cursor is going 
to be unscaled in the correct orientation.


Would be good to have some IGT tests for these scaled cases to verify 
atomic check pass/fail assumptions, but for now:


Reviewed-by: Nicholas Kazlauskas 

Regards,
Nicholas Kazlauskas


---
  .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 35 ++-
  1 file changed, 27 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index a3c0f2e4f4c1..c009c668fbe2 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -10736,6 +10736,24 @@ static int dm_update_plane_state(struct dc *dc,
return ret;
  }
  
+static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state,

+  int *src_w, int *src_h)
+{
+   switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
+   case DRM_MODE_ROTATE_90:
+   case DRM_MODE_ROTATE_270:
+   *src_w = plane_state->src_h >> 16;
+   *src_h = plane_state->src_w >> 16;
+   break;
+   case DRM_MODE_ROTATE_0:
+   case DRM_MODE_ROTATE_180:
+   default:
+   *src_w = plane_state->src_w >> 16;
+   *src_h = plane_state->src_h >> 16;
+   break;
+   }
+}
+
  static int dm_check_crtc_cursor(struct drm_atomic_state *state,
struct drm_crtc *crtc,
struct drm_crtc_state *new_crtc_state)
@@ -10744,6 +10762,8 @@ static int dm_check_crtc_cursor(struct drm_atomic_state 
*state,
struct drm_plane_state *new_cursor_state, *new_underlying_state;
int i;
int cursor_scale_w, cursor_scale_h, underlying_scale_w, 
underlying_scale_h;
+   int cursor_src_w, cursor_src_h;
+   int underlying_src_w, underlying_src_h;
  
  	/* On DCE and DCN there is no dedicated hardware cursor plane. We get a

 * cursor per pipe but it's going to inherit the scaling and
@@ -10755,10 +10775,9 @@ static int dm_check_crtc_cursor(struct 
drm_atomic_state *state,
return 0;
}
  
-	cursor_scale_w = new_cursor_state->crtc_w * 1000 /

-(new_cursor_state->src_w >> 16);
-   cursor_scale_h = new_cursor_state->crtc_h * 1000 /
-(new_cursor_state->src_h >> 16);
+   dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, 
&cursor_src_h);
+   cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w;
+   cursor_scale_h = new_cursor_state->crtc_h * 1000 / cursor_src_h;
  
  	for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {

/* Narrow down to non-cursor planes on the same CRTC as the 
cursor */
@@ -10769,10 +10788,10 @@ static int dm_check_crtc_cursor(struct 
drm_atomic_state *state,
if (!new_underlying_state->fb)
continue;
  
-		underlying_scale_w = new_underlying_state->crtc_w * 1000 /

-(new_underlying_state->src_w >> 16);
-   underlying_scale_h = new_underlying_state->crtc_h * 1000 /
-(new_underlying_state->src_h >> 16);
+   dm_get_oriented_plane_size(new_underlying_state,
+  &underlying_src_w, 
&underlying_src_h);
+   underlying_scale_w = new_underlying_state->crtc_w * 1000 / 
underlying_src_w;
+   underlying_scale_h = new_underlying_state->crtc_h * 1000 / 
underlying_src_h;
  
  		if (cursor_scale_w != underlying_scale_w ||

cursor_scale_h != underlying_scale_h) {





Re: [PATCH v1] drm/amd/display: Add Debugfs Entry to Force in SST Sequence

2021-12-07 Thread Kazlauskas, Nicholas

On 2021-12-07 1:55 p.m., Fangzhi Zuo wrote:

It is w/a to check DP2 SST behavior on M42d box.


Isn't this useful beyond just the m42d/dp2?

This should affect regular DP MST support I think. Adding this debug 
flag is okay I think, but I think the names should be updated (inline).




Signed-off-by: Fangzhi Zuo 
---
  .../amd/display/amdgpu_dm/amdgpu_dm_debugfs.c | 27 +++
  1 file changed, 27 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index 31c05eb5c64a..9590c0acba1f 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -3237,6 +3237,30 @@ static int disable_hpd_get(void *data, u64 *val)
  DEFINE_DEBUGFS_ATTRIBUTE(disable_hpd_ops, disable_hpd_get,
 disable_hpd_set, "%llu\n");
  
+/*

+ * w/a to force in SST mode for M42D DP2 receiver.
+ * Example usage: echo 1 > /sys/kernel/debug/dri/0/amdgpu_dm_dp2_force_sst
+ */
+static int dp2_force_sst_set(void *data, u64 val)
+{
+   struct amdgpu_device *adev = data;
+
+   adev->dm.dc->debug.set_mst_en_for_sst = val;
+
+   return 0;
+}
+
+static int dp2_force_sst_get(void *data, u64 *val)
+{
+   struct amdgpu_device *adev = data;
+
+   *val = adev->dm.dc->debug.set_mst_en_for_sst;
+
+   return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(dp2_force_sst_ops, dp2_force_sst_get,
+dp2_force_sst_set, "%llu\n");
+
  /*
   * Sets the DC visual confirm debug option from the given string.
   * Example usage: echo 1 > /sys/kernel/debug/dri/0/amdgpu_visual_confirm
@@ -3371,4 +3395,7 @@ void dtn_debugfs_init(struct amdgpu_device *adev)
debugfs_create_file_unsafe("amdgpu_dm_disable_hpd", 0644, root, adev,
   &disable_hpd_ops);
  
+	debugfs_create_file_unsafe("amdgpu_dm_dp2_force_sst", 0644, root, adev,

+   &dp2_force_sst_ops);


"amdgpu_dm_dp_set_mst_en_for_sst"

...might be a better name.

Regards,
Nicholas Kazlauskas


+
  }





Re: [PATCH] drm/amdgpu: add DMUB outbox event IRQ source define/complete/debug flag

2021-04-06 Thread Kazlauskas, Nicholas

On 2021-03-31 11:21 p.m., Jude Shih wrote:

[Why & How]
We use outbox interrupt that allows us to do the AUX via DMUB
Therefore, we need to add some irq source related definition
in the header files;
Also, I added debug flag that allows us to turn it on/off
for testing purpose.


Missing your signed-off-by here, please recommit with

git commit --amend --sign


---
  drivers/gpu/drm/amd/amdgpu/amdgpu.h   | 2 ++
  drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c   | 2 +-
  drivers/gpu/drm/amd/include/amd_shared.h  | 3 ++-
  drivers/gpu/drm/amd/include/ivsrcid/dcn/irqsrcs_dcn_1_0.h | 2 ++
  4 files changed, 7 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 963ecfd84347..479c8a28a3a9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -923,6 +923,7 @@ struct amdgpu_device {
struct amdgpu_irq_src   pageflip_irq;
struct amdgpu_irq_src   hpd_irq;
struct amdgpu_irq_src   dmub_trace_irq;
+   struct amdgpu_irq_src   outbox_irq;
  
  	/* rings */

u64 fence_context;
@@ -1077,6 +1078,7 @@ struct amdgpu_device {
  
  	boolin_pci_err_recovery;

struct pci_saved_state  *pci_state;
+   struct completion dmub_aux_transfer_done;
  };
  
  static inline struct amdgpu_device *drm_to_adev(struct drm_device *ddev)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 6a06234dbcad..0b88e13f5a7b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -159,7 +159,7 @@ int amdgpu_smu_pptable_id = -1;
   * PSR (bit 3) disabled by default
   */
  uint amdgpu_dc_feature_mask = 2;
-uint amdgpu_dc_debug_mask;
+uint amdgpu_dc_debug_mask = 0x10;


If this is intended to be enabled by default then it shouldn't be a 
debug flag. Please either leave the default alone or fully switch over 
to DMCUB AUX support for ASIC that support it.


If you don't already have a check from driver to DMCUB firmware to 
ensure that the firmware itself supports it you'd need that as well - 
users can be running older firmware (like the firmware that originally 
released with DCN2.1/DCN3.0 support) and that wouldn't support this feature.


My recommendation:
- Add a command to check for DMUB AUX capability or add bits to the 
metadata to indicate that the firmware does support it
- Assume that the DMUB AUX implementation is solid and a complete 
replacement for existing AUX support on firmware that does support it
- Add a debug flag like DC_DISABLE_DMUB_AUX for optionally debugging 
issues if they arise



  int amdgpu_async_gfx_ring = 1;
  int amdgpu_mcbp;
  int amdgpu_discovery = -1;
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h 
b/drivers/gpu/drm/amd/include/amd_shared.h
index 43ed6291b2b8..097672cc78a1 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -227,7 +227,8 @@ enum DC_DEBUG_MASK {
DC_DISABLE_PIPE_SPLIT = 0x1,
DC_DISABLE_STUTTER = 0x2,
DC_DISABLE_DSC = 0x4,
-   DC_DISABLE_CLOCK_GATING = 0x8
+   DC_DISABLE_CLOCK_GATING = 0x8,
+   DC_ENABLE_DMUB_AUX = 0x10,
  };
  
  enum amd_dpm_forced_level;

diff --git a/drivers/gpu/drm/amd/include/ivsrcid/dcn/irqsrcs_dcn_1_0.h 
b/drivers/gpu/drm/amd/include/ivsrcid/dcn/irqsrcs_dcn_1_0.h
index e2bffcae273a..754170a86ea4 100644
--- a/drivers/gpu/drm/amd/include/ivsrcid/dcn/irqsrcs_dcn_1_0.h
+++ b/drivers/gpu/drm/amd/include/ivsrcid/dcn/irqsrcs_dcn_1_0.h
@@ -1132,5 +1132,7 @@
  
  #define DCN_1_0__SRCID__DMCUB_OUTBOX_HIGH_PRIORITY_READY_INT   0x68

  #define DCN_1_0__CTXID__DMCUB_OUTBOX_HIGH_PRIORITY_READY_INT   6
+#define DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT0x68 // 
DMCUB_IHC_outbox1_ready_int IHC_DMCUB_outbox1_ready_int_ack 
DMCUB_OUTBOX_LOW_PRIORITY_READY_INTERRUPT DISP_INTERRUPT_STATUS_CONTINUE24 
Level/Pulse
+#define DCN_1_0__CTXID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT8


This technically isn't on DCN_1_0 but I guess we've been using this file 
for all the DCNs.


I do wish this was labeled DCN_2_1 instead to make it more explicit but 
I guess this is fine for now.


Regards,
Nicholas Kazlauskas

  
  #endif // __IRQSRCS_DCN_1_0_H__




___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amdgpu: add DMUB outbox event IRQ source define/complete/debug flag

2021-04-06 Thread Kazlauskas, Nicholas

On 2021-04-06 9:40 a.m., Jude Shih wrote:

[Why & How]
We use outbox interrupt that allows us to do the AUX via DMUB
Therefore, we need to add some irq source related definition
in the header files;
Also, I added debug flag that allows us to turn it on/off
for testing purpose.

Signed-off-by: Jude Shih 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu.h   | 2 ++
  drivers/gpu/drm/amd/include/amd_shared.h  | 3 ++-
  drivers/gpu/drm/amd/include/ivsrcid/dcn/irqsrcs_dcn_1_0.h | 2 ++
  3 files changed, 6 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 963ecfd84347..7e64fc5e0dcd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -923,6 +923,7 @@ struct amdgpu_device {
struct amdgpu_irq_src   pageflip_irq;
struct amdgpu_irq_src   hpd_irq;
struct amdgpu_irq_src   dmub_trace_irq;
+   struct amdgpu_irq_src   dmub_outbox_irq;
  
  	/* rings */

u64 fence_context;
@@ -1077,6 +1078,7 @@ struct amdgpu_device {
  
  	boolin_pci_err_recovery;

struct pci_saved_state  *pci_state;
+   struct completion dmub_aux_transfer_done;


Does this completion need to be on the amdgpu device itself?

I would prefer if we keep this as needed within DM itself if possible.


  };
  
  static inline struct amdgpu_device *drm_to_adev(struct drm_device *ddev)

diff --git a/drivers/gpu/drm/amd/include/amd_shared.h 
b/drivers/gpu/drm/amd/include/amd_shared.h
index 43ed6291b2b8..097672cc78a1 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -227,7 +227,8 @@ enum DC_DEBUG_MASK {
DC_DISABLE_PIPE_SPLIT = 0x1,
DC_DISABLE_STUTTER = 0x2,
DC_DISABLE_DSC = 0x4,
-   DC_DISABLE_CLOCK_GATING = 0x8
+   DC_DISABLE_CLOCK_GATING = 0x8,
+   DC_ENABLE_DMUB_AUX = 0x10,


My problem with still leaving this as DC_ENABLE_DMUB_AUX is we shouldn't 
require the user to have to flip this on by default later. I think I'd 
prefer this still as a DISABLE option if we want to leave it for users 
to debug any potential issues.


If there's no value in having end users debug issues by setting this bit 
then we should keep it as a dc->debug default in DCN resource.


Regards,
Nicholas Kazlauskas


  };
  
  enum amd_dpm_forced_level;

diff --git a/drivers/gpu/drm/amd/include/ivsrcid/dcn/irqsrcs_dcn_1_0.h 
b/drivers/gpu/drm/amd/include/ivsrcid/dcn/irqsrcs_dcn_1_0.h
index e2bffcae273a..754170a86ea4 100644
--- a/drivers/gpu/drm/amd/include/ivsrcid/dcn/irqsrcs_dcn_1_0.h
+++ b/drivers/gpu/drm/amd/include/ivsrcid/dcn/irqsrcs_dcn_1_0.h
@@ -1132,5 +1132,7 @@
  
  #define DCN_1_0__SRCID__DMCUB_OUTBOX_HIGH_PRIORITY_READY_INT   0x68

  #define DCN_1_0__CTXID__DMCUB_OUTBOX_HIGH_PRIORITY_READY_INT   6
+#define DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT0x68 // 
DMCUB_IHC_outbox1_ready_int IHC_DMCUB_outbox1_ready_int_ack 
DMCUB_OUTBOX_LOW_PRIORITY_READY_INTERRUPT DISP_INTERRUPT_STATUS_CONTINUE24 
Level/Pulse
+#define DCN_1_0__CTXID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT8
  
  #endif // __IRQSRCS_DCN_1_0_H__




___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amdgpu: add DMUB outbox event IRQ source define/complete/debug flag

2021-04-06 Thread Kazlauskas, Nicholas

On 2021-04-06 10:22 a.m., Shih, Jude wrote:

[AMD Official Use Only - Internal Distribution Only]

Hi Nicholas,

Does this completion need to be on the amdgpu device itself?

I would prefer if we keep this as needed within DM itself if possible.

=> do you mean move it to amdgpu_display_manager in amdgpu_dm.h as global 
variable?


There's a amdgpu_display_manager per device, but yes, it should be 
contained in there if possible since it's display code.




My problem with still leaving this as DC_ENABLE_DMUB_AUX is we shouldn't 
require the user to have to flip this on by default later. I think I'd prefer 
this still as a DISABLE option if we want to leave it for users to debug any 
potential issues.
=> do you mean DC_ENABLE_DMUB_AUX = 0x10 => DC_DISABLE_DMUB_AUX = 0x10
and amdgpu_dc_debug_mask = 0x10 as default to turn it off?


Don't modify the default debug mask and leave it alone. We can still 
have DC_DISABLE_DMUB_AUX = 0x10 as a user debug option if they have 
firmware that supports this.


Flag or not, we need a mechanism from driver to firmware to query 
whether the firmware supports it in the first place. It's not sufficient 
to fully control this feature with just a debug flag, there needs to be 
a cap check regardless with the firmware for support.


Older firmware won't implement this check and therefore won't enable the 
feature.


Newer (or test) firmware could enable this feature and report back to 
driver that it does support it.


Driver can then decide to enable this based on 
dc->debug.dmub_aux_support or something similar to that - it can be 
false or ASIC that we won't be supporting this on, but for ASIC that we 
do we can leave it off by default until it's production ready.


For developer testing we can hardcode the flag = true, I think the DC 
debug flags here in AMDGPU base driver only have value if we want 
general end user or validation to use this to debug potential issues.


Regards,
Nicholas Kazlauskas



Thanks,

Best Regards,

Jude

-Original Message-
From: Kazlauskas, Nicholas 
Sent: Tuesday, April 6, 2021 10:04 PM
To: Shih, Jude ; amd-gfx@lists.freedesktop.org
Cc: Deucher, Alexander ; Lin, Wayne ; 
Hung, Cruise 
Subject: Re: [PATCH] drm/amdgpu: add DMUB outbox event IRQ source 
define/complete/debug flag

On 2021-04-06 9:40 a.m., Jude Shih wrote:

[Why & How]
We use outbox interrupt that allows us to do the AUX via DMUB
Therefore, we need to add some irq source related definition in the
header files; Also, I added debug flag that allows us to turn it
on/off for testing purpose.

Signed-off-by: Jude Shih 
---
   drivers/gpu/drm/amd/amdgpu/amdgpu.h   | 2 ++
   drivers/gpu/drm/amd/include/amd_shared.h  | 3 ++-
   drivers/gpu/drm/amd/include/ivsrcid/dcn/irqsrcs_dcn_1_0.h | 2 ++
   3 files changed, 6 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 963ecfd84347..7e64fc5e0dcd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -923,6 +923,7 @@ struct amdgpu_device {
struct amdgpu_irq_src   pageflip_irq;
struct amdgpu_irq_src   hpd_irq;
struct amdgpu_irq_src   dmub_trace_irq;
+   struct amdgpu_irq_src   dmub_outbox_irq;
   
   	/* rings */

u64 fence_context;
@@ -1077,6 +1078,7 @@ struct amdgpu_device {
   
   	boolin_pci_err_recovery;

struct pci_saved_state  *pci_state;
+   struct completion dmub_aux_transfer_done;


Does this completion need to be on the amdgpu device itself?

I would prefer if we keep this as needed within DM itself if possible.


   };
   
   static inline struct amdgpu_device *drm_to_adev(struct drm_device

*ddev) diff --git a/drivers/gpu/drm/amd/include/amd_shared.h
b/drivers/gpu/drm/amd/include/amd_shared.h
index 43ed6291b2b8..097672cc78a1 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -227,7 +227,8 @@ enum DC_DEBUG_MASK {
DC_DISABLE_PIPE_SPLIT = 0x1,
DC_DISABLE_STUTTER = 0x2,
DC_DISABLE_DSC = 0x4,
-   DC_DISABLE_CLOCK_GATING = 0x8
+   DC_DISABLE_CLOCK_GATING = 0x8,
+   DC_ENABLE_DMUB_AUX = 0x10,


My problem with still leaving this as DC_ENABLE_DMUB_AUX is we shouldn't 
require the user to have to flip this on by default later. I think I'd prefer 
this still as a DISABLE option if we want to leave it for users to debug any 
potential issues.

If there's no value in having end users debug issues by setting this bit then we 
should keep it as a dc->debug default in DCN resource.

Regards,
Nicholas Kazlauskas


   };
   
   enum amd_dpm_forced_level;

diff --git a/drivers/gpu/drm/amd/include/ivsrcid/dcn/irqsrcs_dcn_1_0.h
b/drivers/gpu/drm/amd/include/ivsrcid/dcn/irqsrcs_dcn_1_0.h
i

Re: [PATCH] drm/amd/display: Reject non-zero src_y and src_x for video planes

2021-04-22 Thread Kazlauskas, Nicholas

On 2021-04-22 7:20 p.m., Harry Wentland wrote:

[Why]
This hasn't been well tested and leads to complete system hangs on DCN1
based systems, possibly others.

The system hang can be reproduced by gesturing the video on the YouTube
Android app on ChromeOS into full screen.

[How]
Reject atomic commits with non-zero drm_plane_state.src_x or src_y values.

Signed-off-by: Harry Wentland 
Change-Id: I5e951f95fc87c86517b9ea6e094d73603184f00b


Drop the Change-ID on submit.


---
  drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 7 +++
  1 file changed, 7 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 4b3b9599aaf7..99fd555ebb91 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -2825,6 +2825,13 @@ static int fill_dc_scaling_info(const struct 
drm_plane_state *state,
scaling_info->src_rect.x = state->src_x >> 16;
scaling_info->src_rect.y = state->src_y >> 16;
  
+

+   if (state->fb &&
+   state->fb->format->format == DRM_FORMAT_NV12 &&
+   (scaling_info->src_rect.x != 0 ||
+scaling_info->src_rect.y != 0))
+   return -EINVAL;
+


Would like to see a comment in the source code similar to what's 
explained in the commit message so if people skim through the code they 
understand some of the background on this.


I'd also like to know if this is generic across all DCN or specific to 
DCN1. For now at least we can disable it generically I think.


With the commit message updated and source commented this patch is:

Reviewed-by: Nicholas Kazlauskas 


scaling_info->src_rect.width = state->src_w >> 16;
if (scaling_info->src_rect.width == 0)
return -EINVAL;



___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amd/display: Make underlay rules less strict

2021-05-07 Thread Kazlauskas, Nicholas

On 2021-05-07 10:37 a.m., Rodrigo Siqueira wrote:

Currently, we reject all conditions where the underlay plane goes
outside the overlay plane limits, which is not entirely correct since we
reject some valid cases like the ones illustrated below:

   ++  ++
   |   Overlay plane|  |   Overlay plane|
   ||  |+---|--+
   | +--+   |  ||   |  |
   | |  |   |  ||   |  |
   ++  ++  |
 | Primary plane|   +--+
 |  (underlay)  |
 +--+
   +-+--+---+  ++
   |Overlay plane   |  |Overlay plane   |
+-|+   |  |   +--+
| ||   |  |   || |
| ||   |  |   || |
| ||   |  |   || |
+-|+   |  |   +--+
   ++  ++

This patch fixes this issue by only rejecting commit requests where the
underlay is entirely outside the overlay limits. After applying this
patch, a set of subtests related to kms_plane, kms_plane_alpha_blend,
and kms_plane_scaling will pass.

Signed-off-by: Rodrigo Siqueira 


What's the size of the overlay plane in your examples? If the overlay 
plane does not cover the entire screen then this patch is incorrect.


We don't want to be enabling the cursor on multiple pipes and the checks 
in DC to allow disabling cursor on bottom pipes only work if the 
underlay is entirely contained within the overlay.


In the case where the primary (underlay) plane extends beyond the screen 
boundaries it should be preclipped by userspace or earlier in the DM 
code before this check.


Feel free to follow up with clarification, but for now this patch is a 
NAK from me.


Regards,
Nicholas Kazlauskas


---
  drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 8 
  1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index cc048c348a92..15006aafc630 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -10098,10 +10098,10 @@ static int validate_overlay(struct drm_atomic_state 
*state)
return 0;
  
  	/* Perform the bounds check to ensure the overlay plane covers the primary */

-   if (primary_state->crtc_x < overlay_state->crtc_x ||
-   primary_state->crtc_y < overlay_state->crtc_y ||
-   primary_state->crtc_x + primary_state->crtc_w > overlay_state->crtc_x 
+ overlay_state->crtc_w ||
-   primary_state->crtc_y + primary_state->crtc_h > overlay_state->crtc_y 
+ overlay_state->crtc_h) {
+   if (primary_state->crtc_x + primary_state->crtc_w < 
overlay_state->crtc_x ||
+   primary_state->crtc_x > overlay_state->crtc_x + 
overlay_state->crtc_w ||
+   primary_state->crtc_y > overlay_state->crtc_y + 
overlay_state->crtc_h ||
+   primary_state->crtc_y + primary_state->crtc_h < 
overlay_state->crtc_y) {
DRM_DEBUG_ATOMIC("Overlay plane is enabled with hardware cursor but 
does not fully cover primary plane\n");
return -EINVAL;
}



___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amd/display: Move plane code from amdgpu_dm to amdgpu_dm_plane

2021-05-07 Thread Kazlauskas, Nicholas

On 2021-05-07 10:39 a.m., Rodrigo Siqueira wrote:

The amdgpu_dm file contains most of the code that works as an interface
between DRM API and Display Core. We maintain all the plane operations
inside amdgpu_dm; this commit extracts the plane code to its specific
file named amdgpu_dm_plane. This commit does not introduce any
functional change to the functions; it only changes some static
functions to global and adds some minor adjustments related to the copy
from one place to another.

Signed-off-by: Rodrigo Siqueira 
---
  .../gpu/drm/amd/display/amdgpu_dm/Makefile|9 +-
  .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 1479 +---
  .../amd/display/amdgpu_dm/amdgpu_dm_plane.c   | 1496 +
  .../amd/display/amdgpu_dm/amdgpu_dm_plane.h   |   56 +
  4 files changed, 1559 insertions(+), 1481 deletions(-)
  create mode 100644 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
  create mode 100644 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile 
b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
index 9a3b7bf8ab0b..6542ef0ff83e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
@@ -23,9 +23,12 @@
  # Makefile for the 'dm' sub-component of DAL.
  # It provides the control and status of dm blocks.
  
-

-
-AMDGPUDM = amdgpu_dm.o amdgpu_dm_irq.o amdgpu_dm_mst_types.o amdgpu_dm_color.o
+AMDGPUDM := \
+   amdgpu_dm.o \
+   amdgpu_dm_color.o \
+   amdgpu_dm_irq.o \
+   amdgpu_dm_mst_types.o \
+   amdgpu_dm_plane.o
  
  ifneq ($(CONFIG_DRM_AMD_DC),)

  AMDGPUDM += amdgpu_dm_services.o amdgpu_dm_helpers.o amdgpu_dm_pp_smu.o
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index cc048c348a92..60ddb4d8be6c 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -44,6 +44,7 @@
  #include "amdgpu_ucode.h"
  #include "atom.h"
  #include "amdgpu_dm.h"
+#include "amdgpu_dm_plane.h"
  #ifdef CONFIG_DRM_AMD_DC_HDCP
  #include "amdgpu_dm_hdcp.h"
  #include 
@@ -181,10 +182,6 @@ static int amdgpu_dm_initialize_drm_device(struct 
amdgpu_device *adev);
  /* removes and deallocates the drm structures, created by the above function 
*/
  static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
  
-static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,

-   struct drm_plane *plane,
-   unsigned long possible_crtcs,
-   const struct dc_plane_cap *plane_cap);
  static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
   struct drm_plane *plane,
   uint32_t link_index);
@@ -203,9 +200,6 @@ static void amdgpu_dm_atomic_commit_tail(struct 
drm_atomic_state *state);
  static int amdgpu_dm_atomic_check(struct drm_device *dev,
  struct drm_atomic_state *state);
  
-static void handle_cursor_update(struct drm_plane *plane,

-struct drm_plane_state *old_plane_state);
-
  static void amdgpu_dm_set_psr_caps(struct dc_link *link);
  static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
  static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
@@ -4125,925 +4119,12 @@ static const struct drm_encoder_funcs 
amdgpu_dm_encoder_funcs = {
.destroy = amdgpu_dm_encoder_destroy,
  };
  
-

-static void get_min_max_dc_plane_scaling(struct drm_device *dev,
-struct drm_framebuffer *fb,
-int *min_downscale, int *max_upscale)
-{
-   struct amdgpu_device *adev = drm_to_adev(dev);
-   struct dc *dc = adev->dm.dc;
-   /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
-   struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
-
-   switch (fb->format->format) {
-   case DRM_FORMAT_P010:
-   case DRM_FORMAT_NV12:
-   case DRM_FORMAT_NV21:
-   *max_upscale = plane_cap->max_upscale_factor.nv12;
-   *min_downscale = plane_cap->max_downscale_factor.nv12;
-   break;
-
-   case DRM_FORMAT_XRGB16161616F:
-   case DRM_FORMAT_ARGB16161616F:
-   case DRM_FORMAT_XBGR16161616F:
-   case DRM_FORMAT_ABGR16161616F:
-   *max_upscale = plane_cap->max_upscale_factor.fp16;
-   *min_downscale = plane_cap->max_downscale_factor.fp16;
-   break;
-
-   default:
-   *max_upscale = plane_cap->max_upscale_factor.argb;
-   *min_downscale = plane_cap->max_downscale_factor.argb;
-   break;
-   }
-
-   /*
-* A factor of 1 in the plane_cap means to not allow scaling, ie. use a
-* scaling factor of 1.0 == 1000 units.
-   

RE: [PATCH v5] drm/amd/display: Revert W/A for hard hangs on DCN20/DCN21

2022-01-07 Thread Kazlauskas, Nicholas
[AMD Official Use Only]

> -Original Message-
> From: Limonciello, Mario 
> Sent: January 7, 2022 11:50 AM
> To: amd-gfx@lists.freedesktop.org
> Cc: Limonciello, Mario ; Kazlauskas, Nicholas
> ; Zhuo, Qingqing (Lillian)
> ; Scott Bruce ; Chris
> Hixon ; spassw...@web.de
> Subject: [PATCH v5] drm/amd/display: Revert W/A for hard hangs on
> DCN20/DCN21
> Importance: High
>
> The WA from commit 2a50edbf10c8 ("drm/amd/display: Apply w/a for hard
> hang
> on HPD") and commit 1bd3bc745e7f ("drm/amd/display: Extend w/a for hard
> hang on HPD to dcn20") causes a regression in s0ix where the system will
> fail to resume properly on many laptops.  Pull the workarounds out to
> avoid that s0ix regression in the common case.  This HPD hang happens with
> an external device and a new W/A will need to be developed for this in the
> future.
>
> Cc: Kazlauskas Nicholas 
> Cc: Qingqing Zhuo 
> Reported-by: Scott Bruce 
> Reported-by: Chris Hixon 
> Reported-by: spassw...@web.de
> Link: https://bugzilla.kernel.org/show_bug.cgi?id=215436
> Link: https://gitlab.freedesktop.org/drm/amd/-/issues/1821
> Link: https://gitlab.freedesktop.org/drm/amd/-/issues/1852
> Fixes: 2a50edbf10c8 ("drm/amd/display: Apply w/a for hard hang on HPD")
> Fixes: 1bd3bc745e7f ("drm/amd/display: Extend w/a for hard hang on HPD to
> dcn20")
> Signed-off-by: Mario Limonciello 

I think the revert is fine once we figure out where we're missing calls to:

.optimize_pwr_state = dcn21_optimize_pwr_state,
.exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state,

These are already part of dc_link_detect, so I suspect there's another 
interface in DC that should be using these.

I think the best way to debug this is to revert the patch locally and add a 
stack dump when DMCUB hangs our times out.

That way you can know where the PHY was trying to be accessed without the 
refclk being on.

We had a similar issue in DCN31 which didn't require a W/A like DCN21.

I'd like to hold off on merging this until that hang is verified as gone.

Regards,
Nicholas Kazlauskas

> ---
>  .../display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c  | 11 +---
>  .../amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c | 11 +---
>  .../display/dc/irq/dcn20/irq_service_dcn20.c  | 25 ---
>  .../display/dc/irq/dcn20/irq_service_dcn20.h  |  2 --
>  .../display/dc/irq/dcn21/irq_service_dcn21.c  | 25 ---
>  .../display/dc/irq/dcn21/irq_service_dcn21.h  |  2 --
>  .../gpu/drm/amd/display/dc/irq/irq_service.c  |  2 +-
>  .../gpu/drm/amd/display/dc/irq/irq_service.h  |  4 ---
>  8 files changed, 3 insertions(+), 79 deletions(-)
>
> diff --git
> a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
> b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
> index 9f35f2e8f971..cac80ba69072 100644
> --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
> +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
> @@ -38,7 +38,6 @@
>  #include "clk/clk_11_0_0_offset.h"
>  #include "clk/clk_11_0_0_sh_mask.h"
>
> -#include "irq/dcn20/irq_service_dcn20.h"
>
>  #undef FN
>  #define FN(reg_name, field_name) \
> @@ -223,8 +222,6 @@ void dcn2_update_clocks(struct clk_mgr
> *clk_mgr_base,
>   bool force_reset = false;
>   bool p_state_change_support;
>   int total_plane_count;
> - int irq_src;
> - uint32_t hpd_state;
>
>   if (dc->work_arounds.skip_clock_update)
>   return;
> @@ -242,13 +239,7 @@ void dcn2_update_clocks(struct clk_mgr
> *clk_mgr_base,
>   if (dc->res_pool->pp_smu)
>   pp_smu = &dc->res_pool->pp_smu->nv_funcs;
>
> - for (irq_src = DC_IRQ_SOURCE_HPD1; irq_src <=
> DC_IRQ_SOURCE_HPD6; irq_src++) {
> - hpd_state = dc_get_hpd_state_dcn20(dc->res_pool->irqs,
> irq_src);
> - if (hpd_state)
> - break;
> - }
> -
> - if (display_count == 0 && !hpd_state)
> + if (display_count == 0)
>   enter_display_off = true;
>
>   if (enter_display_off == safe_to_lower) {
> diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
> b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
> index fbda42313bfe..f4dee0e48a67 100644
> --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
> +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
> @@ -42,7 +42,6 @@
>  #include "clk/clk_10_0_2_sh_mask.h"
>  #include "renoir_ip_offset.h"
>
> -#include "irq/dcn21/irq_service_dcn21.h"
>
>  /* Constants */
>
> @@ -129,11

Re: [PATCH] drm/amd/display: reset dcn31 SMU mailbox on failures

2022-01-07 Thread Kazlauskas, Nicholas

On 2022-01-07 4:40 p.m., Mario Limonciello wrote:

Otherwise future commands may fail as well leading to downstream
problems that look like they stemmed from a timeout the first time
but really didn't.

Signed-off-by: Mario Limonciello 


I guess we used to do this but after we started adding the 
wait_for_response prior to sending the command this was ignored.


Should be fine.

Reviewed-by: Nicholas Kazlauskas 

Regards,
Nicholas Kazlauskas


---
  drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c | 6 ++
  1 file changed, 6 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c 
b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c
index 8c2b77eb9459..162ae7186124 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c
@@ -119,6 +119,12 @@ int dcn31_smu_send_msg_with_param(
  
  	result = dcn31_smu_wait_for_response(clk_mgr, 10, 20);
  
+	if (result == VBIOSSMC_Result_Failed) {

+   ASSERT(0);
+   REG_WRITE(MP1_SMN_C2PMSG_91, VBIOSSMC_Result_OK);
+   return -1;
+   }
+
if (IS_SMU_TIMEOUT(result)) {
ASSERT(0);
dm_helpers_smu_timeout(CTX, msg_id, param, 10 * 20);




RE: [PATCH v5] drm/amd/display: Revert W/A for hard hangs on DCN20/DCN21

2022-01-14 Thread Kazlauskas, Nicholas
[Public]

> -Original Message-
> From: Limonciello, Mario 
> Sent: January 14, 2022 10:38 AM
> To: Chris Hixon ; Kazlauskas, Nicholas
> ; amd-gfx@lists.freedesktop.org
> Cc: Zhuo, Qingqing (Lillian) ; Scott Bruce
> ; spassw...@web.de
> Subject: RE: [PATCH v5] drm/amd/display: Revert W/A for hard hangs on
> DCN20/DCN21
> Importance: High
>
> [AMD Official Use Only]
>
> > >
> > >
> > >> I think the revert is fine once we figure out where we're missing calls 
> > >> to:
> > >>
> > >>  .optimize_pwr_state = dcn21_optimize_pwr_state,
> > >>  .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state,
> > >>
> > >> These are already part of dc_link_detect, so I suspect there's another
> > interface
> > >> in DC that should be using these.
> > >>
> > >> I think the best way to debug this is to revert the patch locally and 
> > >> add a
> stack
> > >> dump when DMCUB hangs our times out.
> > > OK so I did this on top of amd-staging-drm-next with my v5 patch (this
> revert in
> > place)
> > >
> > > diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
> > b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
> > > index 9280f2abd973..0bd32f82f3db 100644
> > > --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
> > > +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
> > > @@ -789,8 +789,10 @@ enum dmub_status
> > dmub_srv_cmd_with_reply_data(struct dmub_srv *dmub,
> > >  // Execute command
> > >  status = dmub_srv_cmd_execute(dmub);
> > >
> > > -   if (status != DMUB_STATUS_OK)
> > > +   if (status != DMUB_STATUS_OK) {
> > > +   ASSERT(0);
> > >  return status;
> > > +   }
> > >
> > >  // Wait for DMUB to process command
> > >  status = dmub_srv_wait_for_idle(dmub, 10);
> > >
> > >> That way you can know where the PHY was trying to be accessed
> without the
> > >> refclk being on.
> > >>
> > >> We had a similar issue in DCN31 which didn't require a W/A like DCN21.
> > >>
> > >> I'd like to hold off on merging this until that hang is verified as gone.
> > >>
> > > Then I took a RN laptop running DMUB 0x01010019 and disabled eDP, and
> > confirmed
> > > no CRTC was configured but plugged in an HDMI cable:
> > >
> > > connector[78]: eDP-1
> > >  crtc=(null)
> > >  self_refresh_aware=0
> > > connector[85]: HDMI-A-1
> > >  crtc=crtc-1
> > >  self_refresh_aware=0
> > >
> > > I triggered 100 hotplugs like this:
> > >
> > > #!/bin/bash
> > > for i in {0..100..1}
> > > do
> > >  echo 1 | tee /sys/kernel/debug/dri/0/HDMI-A-1/trigger_hotplug
> > >  sleep 3
> > > done
> > >
> > > Unfortunately, no hang or traceback to be seen (and HDMI continues to
> work).
> > > I also manually pulled the plug a handful of times I don't know the
> specifics
> > that Lillian had the
> > > failure though, so this might not be a good enough check.
> > >
> > > I'll try to upgrade DMUB to 0x101001c (the latest version) and double
> check
> > that as well.
> >
> > I applied patch v5 and the above ASSERT patch, on top of both Linux
> > 5.16-rc8 and 5.16.
> >
> > Result: no problems with suspend/resume, 16+ cycles.
> >
> > As far as the hang goes:
> >
> > I plugged in an HDMI cable connected to my TV, and configured Gnome to
> > use the external display only.
> >
> > connectors from /sys/kernel/debug/dri/0/state:
> >
> > connector[78]: eDP-1
> >  crtc=(null)
> >  self_refresh_aware=0
> > connector[85]: HDMI-A-1
> >  crtc=crtc-1
> >  self_refresh_aware=0
> > connector[89]: DP-1
> >  crtc=(null)
> >  self_refresh_aware=0
> >
> > I manually unplugged/plugged the HDMI cable 16+ times, and also ran:
> >
> > $ sudo sh -c 'for ((i=0;i<100;i++)); do echo 1 | tee
> > /sys/kernel/debug/dri/0/HDMI-A-1/trigger_hotplug; sleep 3; done'
> >
> > The system did not hang, and I saw no kernel log output from the ASSERT.
> >
> > I also tried a USB-C dock with an HDMI port, with the same results,
> > though there are other issues with this (perhaps 

Re: [PATCH] drm/amd/display: Copy crc_skip_count when duplicating CRTC state

2022-01-18 Thread Kazlauskas, Nicholas

On 1/18/2022 11:40 AM, Rodrigo Siqueira wrote:

From: Leo Li 

[Why]
crc_skip_count is used to track how many frames to skip to allow the OTG
CRC engine to "warm up" before it outputs correct CRC values.
Experimentally, this seems to be 2 frames.

When duplicating CRTC states, this value was not copied to the
duplicated state. Therefore, when this state is committed, we will
needlessly wait 2 frames before outputing CRC values. Even if the CRC
engine is already warmed up. >
[How]
Copy the crc_skip_count as part of dm_crtc_duplicate_state.


This likely introduces regressions.

Here's an example case where it can take two frames even after the CRTC 
is enabled:


1. VUPDATE is before line 0, in the front porch, counter=0
2. Flip arrives before VUPDATE is signaled, but does not finish 
programming until after VUPDATE point, counter=0.

3. Vblank counter increments, counter=1.
4. Flip programming finishes, counter=1.
5. OS delay happens, cursor programming is delayed, counter=1.
6. Cursor programming starts, counter=1.
7. VUPDATE fires, updating frame but missing cursor, counter=1.
8. Cursor programming finishes, counter=2.
9. Cursor programming pending for counter=2.

This is a little contrived, but I've seen something similar happen 
during IGT testing before.


This is because cursor update happens independent of the rest of plane 
programming and is tied to a separate lock. That lock part can't change 
due to potential for stuttering, but the first part could be fixed.


Regards,
Nicholas Kazlauskas



Cc: Mark Yacoub 
Cc: Hayden Goodfellow 
Cc: Harry Wentland 
Cc: Nicholas Choi 

Signed-off-by: Leo Li 
Signed-off-by: Rodrigo Siqueira 
---
  drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 1 +
  1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 87299e62fe12..5482b0925396 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -6568,6 +6568,7 @@ dm_crtc_duplicate_state(struct drm_crtc *crtc)
state->freesync_config = cur->freesync_config;
state->cm_has_degamma = cur->cm_has_degamma;
state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
+   state->crc_skip_count = cur->crc_skip_count;
state->force_dpms_off = cur->force_dpms_off;
/* TODO Duplicate dc_stream after objects are stream object is 
flattened */
  




Re: [PATCH] drm/amd/display: Cap pflip irqs per max otg number

2022-02-03 Thread Kazlauskas, Nicholas

On 2/3/2022 5:14 PM, roman...@amd.com wrote:

From: Roman Li 

[Why]
pflip interrupt order are mapped 1 to 1 to otg id.
e.g. if irq_src=26 corresponds to otg0 then 27->otg1, 28->otg2...

Linux DM registers pflip interrupts per number of crtcs.
In fused pipe case crtc numbers can be less than otg id.

e.g. if one pipe out of 3(otg#0-2) is fused adev->mode_info.num_crtc=2
so DM only registers irq_src 26,27.
This is a bug since if pipe#2 remains unfused DM never gets
otg2 pflip interrupt (irq_src=28)
That may results in gfx failure due to pflip timeout.

[How]
Register pflip interrupts per max num of otg instead of num_crtc

Signed-off-by: Roman Li 


Reviewed-by: Nicholas Kazlauskas 

Regards,
Nicholas Kazlauskas


---
  drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 2 +-
  drivers/gpu/drm/amd/display/dc/core/dc.c  | 2 ++
  drivers/gpu/drm/amd/display/dc/dc.h   | 1 +
  3 files changed, 4 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 8f53c9f..10ca3fc 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -3646,7 +3646,7 @@ static int dcn10_register_irq_handlers(struct 
amdgpu_device *adev)
  
  	/* Use GRPH_PFLIP interrupt */

for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
-   i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + 
adev->mode_info.num_crtc - 1;
+   i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + 
dc->caps.max_otg_num - 1;
i++) {
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, 
&adev->pageflip_irq);
if (r) {
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c 
b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 1d9404f..70a0b89 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -1220,6 +1220,8 @@ struct dc *dc_create(const struct dc_init_data 
*init_params)
  
  		dc->caps.max_dp_protocol_version = DP_VERSION_1_4;
  
+		dc->caps.max_otg_num = dc->res_pool->res_cap->num_timing_generator;

+
if (dc->res_pool->dmcu != NULL)
dc->versions.dmcu_version = 
dc->res_pool->dmcu->dmcu_version;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h 
b/drivers/gpu/drm/amd/display/dc/dc.h
index 69d264d..af05877 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -200,6 +200,7 @@ struct dc_caps {
bool edp_dsc_support;
bool vbios_lttpr_aware;
bool vbios_lttpr_enable;
+   uint32_t max_otg_num;
  };
  
  struct dc_bug_wa {




Re: [PATCH] drm/amd/display: take dc_lock in short pulse handler only

2021-05-19 Thread Kazlauskas, Nicholas

On 2021-05-19 4:55 p.m., Aurabindo Pillai wrote:

[Why]
Conditions that end up modifying the global dc state must be locked.
However, during mst allocate payload sequence, lock is already taken.
With StarTech 1.2 DP hub, we get an HPD RX interrupt for a reason other
than to indicate down reply availability right after sending payload
allocation. The handler again takes dc lock before calling the
dc's HPD RX handler. Due to this contention, the DRM thread which waits
for MST down reply never gets a chance to finish its waiting
successfully and ends up timing out. Once the lock is released, the hpd
rx handler fires and goes ahead to read from the MST HUB, but now its
too late and the HUB doesnt lightup all displays since DRM lacks error
handling when payload allocation fails.

[How]
Take lock only if there is a change in link status or if automated test
pattern bit is set. The latter fixes the null pointer dereference when
running certain DP Link Layer Compliance test.

Signed-off-by: Aurabindo Pillai 


Discussed this a bit offline and I'd *really* like the proper interface 
in sooner rather than later.


Conditional locking is almost always a sign of a bug, in this case we 
know it's OK but someone can change the function underneath later 
without understanding that we're duplicating some of the checking logic 
in the upper layer.


I don't think the code changes enough in this area for this to happen 
(as it's spec based), but please be mindful and consider splitting the 
checking logic (which is thread safe) out with the link loss logic (the 
functional bit, that isn't thread safe).


Reviewed-by: Nicholas Kazlauskas 

Regards,
Nicholas Kazlauskas


---
  .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 19 +--
  .../gpu/drm/amd/display/dc/core/dc_link_dp.c  |  2 +-
  .../gpu/drm/amd/display/dc/inc/dc_link_dp.h   |  4 
  3 files changed, 22 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index e79910cc179c..2c9d099adfc2 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -28,6 +28,7 @@
  
  #include "dm_services_types.h"

  #include "dc.h"
+#include "dc_link_dp.h"
  #include "dc/inc/core_types.h"
  #include "dal_asic_id.h"
  #include "dmub/dmub_srv.h"
@@ -2740,6 +2741,7 @@ static void handle_hpd_rx_irq(void *param)
enum dc_connection_type new_connection_type = dc_connection_none;
struct amdgpu_device *adev = drm_to_adev(dev);
union hpd_irq_data hpd_irq_data;
+   bool lock_flag = 0;
  
  	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
  
@@ -2769,15 +2771,28 @@ static void handle_hpd_rx_irq(void *param)

}
}
  
-	if (!amdgpu_in_reset(adev)) {

+   /*
+* TODO: We need the lock to avoid touching DC state while it's being
+* modified during automated compliance testing, or when link loss
+* happens. While this should be split into subhandlers and proper
+* interfaces to avoid having to conditionally lock like this in the
+* outer layer, we need this workaround temporarily to allow MST
+* lightup in some scenarios to avoid timeout.
+*/
+   if (!amdgpu_in_reset(adev) &&
+   (hpd_rx_irq_check_link_loss_status(dc_link, &hpd_irq_data) ||
+hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST)) {
mutex_lock(&adev->dm.dc_lock);
+   lock_flag = 1;
+   }
+
  #ifdef CONFIG_DRM_AMD_DC_HDCP
result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
  #else
result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
  #endif
+   if (!amdgpu_in_reset(adev) && lock_flag)
mutex_unlock(&adev->dm.dc_lock);
-   }
  
  out:

if (result && !is_mst_root_connector) {
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 9e08410bfdfd..32fb9cdbd980 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -2070,7 +2070,7 @@ enum dc_status read_hpd_rx_irq_data(
return retval;
  }
  
-static bool hpd_rx_irq_check_link_loss_status(

+bool hpd_rx_irq_check_link_loss_status(
struct dc_link *link,
union hpd_irq_data *hpd_irq_dpcd_data)
  {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h 
b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
index ffc3f2c63db8..7dd8bca542b9 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
@@ -68,6 +68,10 @@ bool perform_link_training_with_retries(
enum signal_type signal,
bool do_fallback);
  
+bool hpd_rx_irq_check_link_loss_status(

+   struct dc_link *link,
+   union hpd_irq_data *hpd_irq_dpcd_data);
+
  bool is_mst_supported(struct dc_link *link);
  

Re: [PATCH] drm/amdgpu/display: make backlight setting failure messages debug

2021-05-21 Thread Kazlauskas, Nicholas

On 2021-05-21 12:08 a.m., Alex Deucher wrote:

Avoid spamming the log.  The backlight controller on DCN chips
gets powered down when the display is off, so if you attempt to
set the backlight level when the display is off, you'll get this
message.  This isn't a problem as we cache the requested backlight
level if it's adjusted when the display is off and set it again
during modeset.

Signed-off-by: Alex Deucher 
Cc: nicholas.c...@amd.com
Cc: harry.wentl...@amd.com


Reviewed-by: Nicholas Kazlauskas 

Regards,
Nicholas Kazlauskas


---
  drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 4 ++--
  1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index b8026c1baf36..c1f7456aeaa0 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -3506,7 +3506,7 @@ static int amdgpu_dm_backlight_set_level(struct 
amdgpu_display_manager *dm,
rc = dc_link_set_backlight_level_nits(link[i], true, 
brightness[i],
AUX_BL_DEFAULT_TRANSITION_TIME_MS);
if (!rc) {
-   DRM_ERROR("DM: Failed to update backlight via AUX on 
eDP[%d]\n", i);
+   DRM_DEBUG("DM: Failed to update backlight via AUX on 
eDP[%d]\n", i);
break;
}
}
@@ -3514,7 +3514,7 @@ static int amdgpu_dm_backlight_set_level(struct 
amdgpu_display_manager *dm,
for (i = 0; i < dm->num_of_edps; i++) {
rc = dc_link_set_backlight_level(dm->backlight_link[i], 
brightness[i], 0);
if (!rc) {
-   DRM_ERROR("DM: Failed to update backlight on 
eDP[%d]\n", i);
+   DRM_DEBUG("DM: Failed to update backlight on 
eDP[%d]\n", i);
break;
}
}



___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 2/2] drm/amdgpu/dc: fix DCN3.1 FP handling

2021-06-04 Thread Kazlauskas, Nicholas

On 2021-06-04 2:16 p.m., Alex Deucher wrote:

Missing proper DC_FP_START/DC_FP_END.

Signed-off-by: Alex Deucher 


Thanks for catching these.

Series is Reviewed-by: Nicholas Kazlauskas

Regards,
Nicholas Kazlauskas


---
  .../drm/amd/display/dc/dcn31/dcn31_resource.c  | 18 +-
  1 file changed, 17 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c 
b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
index af978d2cb25f..0d6cb6caad81 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
@@ -1633,7 +1633,7 @@ static void dcn31_update_soc_for_wm_a(struct dc *dc, 
struct dc_state *context)
}
  }
  
-static void dcn31_calculate_wm_and_dlg(

+static void dcn31_calculate_wm_and_dlg_fp(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int pipe_cnt,
@@ -1759,6 +1759,17 @@ static void dcn31_calculate_wm_and_dlg(
dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
  }
  
+static void dcn31_calculate_wm_and_dlg(

+   struct dc *dc, struct dc_state *context,
+   display_e2e_pipe_params_st *pipes,
+   int pipe_cnt,
+   int vlevel)
+{
+   DC_FP_START();
+   dcn31_calculate_wm_and_dlg_fp(dc, context, pipes, pipe_cnt, vlevel);
+   DC_FP_END();
+}
+
  static struct dc_cap_funcs cap_funcs = {
.get_dcc_compression_cap = dcn20_get_dcc_compression_cap
  };
@@ -1890,6 +1901,8 @@ static bool dcn31_resource_construct(
struct dc_context *ctx = dc->ctx;
struct irq_service_init_data init_data;
  
+	DC_FP_START();

+
ctx->dc_bios->regs = &bios_regs;
  
  	pool->base.res_cap = &res_cap_dcn31;

@@ -2152,10 +2165,13 @@ static bool dcn31_resource_construct(
  
  	dc->cap_funcs = cap_funcs;
  
+	DC_FP_END();

+
return true;
  
  create_fail:
  
+	DC_FP_END();

dcn31_resource_destruct(pool);
  
  	return false;




___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amdgpu/display: fold DRM_AMD_DC_DCN3_1 into DRM_AMD_DC_DCN

2021-06-21 Thread Kazlauskas, Nicholas

On 2021-06-21 4:58 p.m., Alex Deucher wrote:

No need for a separate flag now that DCN3.1 is not in bring up.
Fold into DRM_AMD_DC_DCN like previous DCN IPs.

Signed-off-by: Alex Deucher 


Reviewed-by: Nicholas Kazlauskas 

Regards,
Nicholas Kazlauskas


---
  drivers/gpu/drm/amd/display/Kconfig   |  7 --
  .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 22 +--
  .../amd/display/amdgpu_dm/amdgpu_dm_hdcp.c|  4 
  drivers/gpu/drm/amd/display/dc/Makefile   |  2 --
  .../drm/amd/display/dc/bios/bios_parser2.c|  7 +-
  .../display/dc/bios/command_table_helper2.c   |  6 +
  .../gpu/drm/amd/display/dc/clk_mgr/Makefile   |  2 --
  .../gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c  |  7 --
  .../display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c  |  2 --
  drivers/gpu/drm/amd/display/dc/core/dc.c  |  8 +++
  drivers/gpu/drm/amd/display/dc/core/dc_link.c |  6 ++---
  .../gpu/drm/amd/display/dc/core/dc_resource.c | 10 ++---
  .../gpu/drm/amd/display/dc/core/dc_stream.c   |  4 
  drivers/gpu/drm/amd/display/dc/dc.h   | 14 +---
  drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c  |  3 +--
  drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h  |  3 +--
  .../gpu/drm/amd/display/dc/dce/dce_hwseq.h|  6 -
  .../display/dc/dce110/dce110_hw_sequencer.c   |  4 ++--
  .../drm/amd/display/dc/dcn10/dcn10_hubbub.h   |  9 +---
  .../amd/display/dc/dcn10/dcn10_link_encoder.h |  9 +---
  .../gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h |  8 ---
  .../drm/amd/display/dc/dcn20/dcn20_hubbub.h   |  2 --
  .../gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h | 10 -
  .../drm/amd/display/dc/dcn20/dcn20_hwseq.c| 19 +++-
  .../drm/amd/display/dc/dcn20/dcn20_resource.c | 16 --
  .../drm/amd/display/dc/dcn30/dcn30_hwseq.c|  2 --
  .../drm/amd/display/dc/dcn31/dcn31_hwseq.c|  2 --
  drivers/gpu/drm/amd/display/dc/dm_cp_psp.h|  2 --
  drivers/gpu/drm/amd/display/dc/dml/Makefile   |  6 -
  .../dc/dml/dcn31/display_mode_vba_31.c|  2 --
  .../dc/dml/dcn31/display_rq_dlg_calc_31.c |  3 ---
  .../drm/amd/display/dc/dml/display_mode_lib.c |  9 ++--
  .../drm/amd/display/dc/dml/display_mode_lib.h |  2 --
  .../amd/display/dc/dml/display_mode_structs.h |  4 
  .../drm/amd/display/dc/dml/display_mode_vba.c | 12 --
  .../drm/amd/display/dc/dml/display_mode_vba.h |  6 -
  .../gpu/drm/amd/display/dc/gpio/hw_factory.c  |  2 --
  .../drm/amd/display/dc/gpio/hw_translate.c|  2 --
  .../gpu/drm/amd/display/dc/inc/core_types.h   |  6 -
  .../gpu/drm/amd/display/dc/inc/hw/clk_mgr.h   |  2 --
  drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h  |  6 -
  .../gpu/drm/amd/display/dc/inc/hw/dchubbub.h  |  2 --
  .../drm/amd/display/dc/inc/hw/link_encoder.h  | 14 +---
  .../gpu/drm/amd/display/dc/inc/hw/mem_input.h |  2 --
  .../amd/display/dc/inc/hw/timing_generator.h  |  2 --
  .../gpu/drm/amd/display/dc/inc/hw_sequencer.h |  2 --
  drivers/gpu/drm/amd/display/dc/irq/Makefile   |  2 --
  .../display/dc/irq/dcn31/irq_service_dcn31.h  |  3 ---
  drivers/gpu/drm/amd/display/dmub/dmub_srv.h   |  8 ---
  .../gpu/drm/amd/display/dmub/inc/dmub_cmd.h   | 14 +---
  drivers/gpu/drm/amd/display/dmub/src/Makefile |  6 +
  .../gpu/drm/amd/display/dmub/src/dmub_srv.c   |  4 
  .../gpu/drm/amd/display/include/dal_asic_id.h |  2 --
  .../gpu/drm/amd/display/include/dal_types.h   |  2 --
  .../drm/amd/display/modules/hdcp/hdcp_log.c   |  2 --
  .../drm/amd/display/modules/hdcp/hdcp_psp.c   | 18 ---
  .../drm/amd/display/modules/hdcp/hdcp_psp.h   | 13 ++-
  .../drm/amd/display/modules/inc/mod_hdcp.h| 10 -
  58 files changed, 45 insertions(+), 319 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/Kconfig 
b/drivers/gpu/drm/amd/display/Kconfig
index 5b5f36c80efb..7dffc04a557e 100644
--- a/drivers/gpu/drm/amd/display/Kconfig
+++ b/drivers/gpu/drm/amd/display/Kconfig
@@ -31,13 +31,6 @@ config DRM_AMD_DC_SI
  by default. This includes Tahiti, Pitcairn, Cape Verde, Oland.
  Hainan is not supported by AMD DC and it has no physical DCE6.
  
-config DRM_AMD_DC_DCN3_1

-bool "DCN 3.1 family"
-depends on DRM_AMD_DC_DCN
-help
-Choose this option if you want to have
-DCN3.1 family support for display engine
-
  config DEBUG_KERNEL_DC
bool "Enable kgdb break in DC"
depends on DRM_AMD_DC
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index d069661abe45..b5b5ccf0ed71 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -110,10 +110,8 @@ MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
  MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
  #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
  MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
-#if defined(CONFIG_DRM_AMD

Re: [PATCH] drm/amd/display: use correct scale for actual_brightness

2020-08-04 Thread Kazlauskas, Nicholas
This is a cleaner change the other proposed patch since it doesn't need 
to modify the exist conversion functions but I'd be worried about broken 
userspace relying on 0-255 as the only acceptable range.


Not an expert on existing implementations to point out a specific one 
though.


Regards,
Nicholas Kazlauskas

On 2020-08-03 4:02 p.m., Alexander Monakov wrote:

Documentation for sysfs backlight level interface requires that
values in both 'brightness' and 'actual_brightness' files are
interpreted to be in range from 0 to the value given in the
'max_brightness' file.

With amdgpu, max_brightness gives 255, and values written by the user
into 'brightness' are internally rescaled to a wider range. However,
reading from 'actual_brightness' gives the raw register value without
inverse rescaling. This causes issues for various userspace tools such
as PowerTop and systemd that expect the value to be in the correct
range.

Introduce a helper to retrieve internal backlight range. Extend the
existing 'convert_brightness' function to handle conversion in both
directions.

Bug: https://bugzilla.kernel.org/show_bug.cgi?id=203905
Bug: https://gitlab.freedesktop.org/drm/amd/-/issues/1242
Cc: Alex Deucher 
Signed-off-by: Alexander Monakov 
---
  .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 73 ---
  1 file changed, 32 insertions(+), 41 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 710edc70e37e..03e21e7b7917 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -2881,51 +2881,42 @@ static int set_backlight_via_aux(struct dc_link *link, 
uint32_t brightness)
return rc ? 0 : 1;
  }
  
-static u32 convert_brightness(const struct amdgpu_dm_backlight_caps *caps,

- const uint32_t user_brightness)
+static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
+   unsigned *min, unsigned *max)
  {
-   u32 min, max, conversion_pace;
-   u32 brightness = user_brightness;
-
if (!caps)
-   goto out;
+   return 0;
  
-	if (!caps->aux_support) {

-   max = caps->max_input_signal;
-   min = caps->min_input_signal;
-   /*
-* The brightness input is in the range 0-255
-* It needs to be rescaled to be between the
-* requested min and max input signal
-* It also needs to be scaled up by 0x101 to
-* match the DC interface which has a range of
-* 0 to 0x
-*/
-   conversion_pace = 0x101;
-   brightness =
-   user_brightness
-   * conversion_pace
-   * (max - min)
-   / AMDGPU_MAX_BL_LEVEL
-   + min * conversion_pace;
+   if (caps->aux_support) {
+   // Firmware limits are in nits, DC API wants millinits.
+   *max = 1000 * caps->aux_max_input_signal;
+   *min = 1000 * caps->aux_min_input_signal;
} else {
-   /* TODO
-* We are doing a linear interpolation here, which is OK but
-* does not provide the optimal result. We probably want
-* something close to the Perceptual Quantizer (PQ) curve.
-*/
-   max = caps->aux_max_input_signal;
-   min = caps->aux_min_input_signal;
-
-   brightness = (AMDGPU_MAX_BL_LEVEL - user_brightness) * min
-  + user_brightness * max;
-   // Multiple the value by 1000 since we use millinits
-   brightness *= 1000;
-   brightness = DIV_ROUND_CLOSEST(brightness, AMDGPU_MAX_BL_LEVEL);
+   // Firmware limits are 8-bit, PWM control is 16-bit.
+   *max = 0x101 * caps->max_input_signal;
+   *min = 0x101 * caps->min_input_signal;
}
+   return 1;
+}
  
-out:

-   return brightness;
+static u32 convert_brightness(const struct amdgpu_dm_backlight_caps *caps,
+ const uint32_t brightness, int from_user)
+{
+   unsigned min, max;
+
+   if (!get_brightness_range(caps, &min, &max))
+   return brightness;
+
+   if (from_user)
+   // Rescale 0..255 to min..max
+   return min + DIV_ROUND_CLOSEST((max - min) * brightness,
+  AMDGPU_MAX_BL_LEVEL);
+
+   if (brightness < min)
+   return 0;
+   // Rescale min..max to 0..255
+   return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
+max - min);
  }
  
  static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)

@@ -2941,7 +2932,7 @@ static int amdgpu_dm_back

Re: [PATCH] drm/amd/display: use correct scale for actual_brightness

2020-08-04 Thread Kazlauskas, Nicholas

On 2020-08-03 4:02 p.m., Alexander Monakov wrote:

Documentation for sysfs backlight level interface requires that
values in both 'brightness' and 'actual_brightness' files are
interpreted to be in range from 0 to the value given in the
'max_brightness' file.

With amdgpu, max_brightness gives 255, and values written by the user
into 'brightness' are internally rescaled to a wider range. However,
reading from 'actual_brightness' gives the raw register value without
inverse rescaling. This causes issues for various userspace tools such
as PowerTop and systemd that expect the value to be in the correct
range.

Introduce a helper to retrieve internal backlight range. Extend the
existing 'convert_brightness' function to handle conversion in both
directions.

Bug: https://bugzilla.kernel.org/show_bug.cgi?id=203905
Bug: https://gitlab.freedesktop.org/drm/amd/-/issues/1242
Cc: Alex Deucher 
Signed-off-by: Alexander Monakov 


Overall approach seems reasonable, nice catch.

I suggest to add convert_to_user_brightness() instead of making 
from_user a flag and extending the current functionality though. It 
makes it more clear from the call site what's happening.


Regards,
Nicholas Kazlauskas


---
  .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 73 ---
  1 file changed, 32 insertions(+), 41 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 710edc70e37e..03e21e7b7917 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -2881,51 +2881,42 @@ static int set_backlight_via_aux(struct dc_link *link, 
uint32_t brightness)
return rc ? 0 : 1;
  }
  
-static u32 convert_brightness(const struct amdgpu_dm_backlight_caps *caps,

- const uint32_t user_brightness)
+static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
+   unsigned *min, unsigned *max)
  {
-   u32 min, max, conversion_pace;
-   u32 brightness = user_brightness;
-
if (!caps)
-   goto out;
+   return 0;
  
-	if (!caps->aux_support) {

-   max = caps->max_input_signal;
-   min = caps->min_input_signal;
-   /*
-* The brightness input is in the range 0-255
-* It needs to be rescaled to be between the
-* requested min and max input signal
-* It also needs to be scaled up by 0x101 to
-* match the DC interface which has a range of
-* 0 to 0x
-*/
-   conversion_pace = 0x101;
-   brightness =
-   user_brightness
-   * conversion_pace
-   * (max - min)
-   / AMDGPU_MAX_BL_LEVEL
-   + min * conversion_pace;
+   if (caps->aux_support) {
+   // Firmware limits are in nits, DC API wants millinits.
+   *max = 1000 * caps->aux_max_input_signal;
+   *min = 1000 * caps->aux_min_input_signal;
} else {
-   /* TODO
-* We are doing a linear interpolation here, which is OK but
-* does not provide the optimal result. We probably want
-* something close to the Perceptual Quantizer (PQ) curve.
-*/
-   max = caps->aux_max_input_signal;
-   min = caps->aux_min_input_signal;
-
-   brightness = (AMDGPU_MAX_BL_LEVEL - user_brightness) * min
-  + user_brightness * max;
-   // Multiple the value by 1000 since we use millinits
-   brightness *= 1000;
-   brightness = DIV_ROUND_CLOSEST(brightness, AMDGPU_MAX_BL_LEVEL);
+   // Firmware limits are 8-bit, PWM control is 16-bit.
+   *max = 0x101 * caps->max_input_signal;
+   *min = 0x101 * caps->min_input_signal;
}
+   return 1;
+}
  
-out:

-   return brightness;
+static u32 convert_brightness(const struct amdgpu_dm_backlight_caps *caps,
+ const uint32_t brightness, int from_user)
+{
+   unsigned min, max;
+
+   if (!get_brightness_range(caps, &min, &max))
+   return brightness;
+
+   if (from_user)
+   // Rescale 0..255 to min..max
+   return min + DIV_ROUND_CLOSEST((max - min) * brightness,
+  AMDGPU_MAX_BL_LEVEL);
+
+   if (brightness < min)
+   return 0;
+   // Rescale min..max to 0..255
+   return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
+max - min);
  }
  
  static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)

@@ -2941,7 +2932,7 @@ static int amdgpu_dm_backlight_update_status(struct 
backlight_devi

Re: [PATCH] drm/amd/display: use correct scale for actual_brightness

2020-08-04 Thread Kazlauskas, Nicholas

On 2020-08-04 12:28 p.m., Alexander Monakov wrote:



On Tue, 4 Aug 2020, Kazlauskas, Nicholas wrote:


This is a cleaner change the other proposed patch since it doesn't need to


Can you give a URL to the other patch please?


Sorry, replied to the wrong email by accident here.

The other change was modifying the max_brightness range and rescaling 
internal min/max defaults.


I don't think it was sent out to the list yet.

Regards,
Nicholas Kazlauskas




modify the exist conversion functions but I'd be worried about broken
userspace relying on 0-255 as the only acceptable range.


Not sure what you mean by this. Userspace simply reads the maximum value from
max_brightness sysfs file. On other gpu/firmware combinations it can be 7 or 9
for example, it just happens to be 255 with modern amdgpu. Minimum value is
always zero.

Value seen in max_brightness remains 255 with this patch, so as far as userspace
is concerned nothing is changed apart from value given by actual_brightness 
file.

Alexander



Not an expert on existing implementations to point out a specific one though.

Regards,
Nicholas Kazlauskas

On 2020-08-03 4:02 p.m., Alexander Monakov wrote:

Documentation for sysfs backlight level interface requires that
values in both 'brightness' and 'actual_brightness' files are
interpreted to be in range from 0 to the value given in the
'max_brightness' file.

With amdgpu, max_brightness gives 255, and values written by the user
into 'brightness' are internally rescaled to a wider range. However,
reading from 'actual_brightness' gives the raw register value without
inverse rescaling. This causes issues for various userspace tools such
as PowerTop and systemd that expect the value to be in the correct
range.

Introduce a helper to retrieve internal backlight range. Extend the
existing 'convert_brightness' function to handle conversion in both
directions.

Bug: https://bugzilla.kernel.org/show_bug.cgi?id=203905
Bug: https://gitlab.freedesktop.org/drm/amd/-/issues/1242
Cc: Alex Deucher 
Signed-off-by: Alexander Monakov 
---
   .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 73 ---
   1 file changed, 32 insertions(+), 41 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 710edc70e37e..03e21e7b7917 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -2881,51 +2881,42 @@ static int set_backlight_via_aux(struct dc_link
*link, uint32_t brightness)
return rc ? 0 : 1;
   }
   -static u32 convert_brightness(const struct amdgpu_dm_backlight_caps
*caps,
- const uint32_t user_brightness)
+static int get_brightness_range(const struct amdgpu_dm_backlight_caps
*caps,
+   unsigned *min, unsigned *max)
   {
-   u32 min, max, conversion_pace;
-   u32 brightness = user_brightness;
-
if (!caps)
-   goto out;
+   return 0;
   -if (!caps->aux_support) {
-   max = caps->max_input_signal;
-   min = caps->min_input_signal;
-   /*
-* The brightness input is in the range 0-255
-* It needs to be rescaled to be between the
-* requested min and max input signal
-* It also needs to be scaled up by 0x101 to
-* match the DC interface which has a range of
-* 0 to 0x
-*/
-   conversion_pace = 0x101;
-   brightness =
-   user_brightness
-   * conversion_pace
-   * (max - min)
-   / AMDGPU_MAX_BL_LEVEL
-   + min * conversion_pace;
+   if (caps->aux_support) {
+   // Firmware limits are in nits, DC API wants millinits.
+   *max = 1000 * caps->aux_max_input_signal;
+   *min = 1000 * caps->aux_min_input_signal;
} else {
-   /* TODO
-* We are doing a linear interpolation here, which is OK but
-* does not provide the optimal result. We probably want
-* something close to the Perceptual Quantizer (PQ) curve.
-*/
-   max = caps->aux_max_input_signal;
-   min = caps->aux_min_input_signal;
-
-   brightness = (AMDGPU_MAX_BL_LEVEL - user_brightness) * min
-  + user_brightness * max;
-   // Multiple the value by 1000 since we use millinits
-   brightness *= 1000;
-   brightness = DIV_ROUND_CLOSEST(brightness,
AMDGPU_MAX_BL_LEVEL);
+   // Firmware limits are 8-bit, PWM control is 16-bit.
+   *max = 0x101 * caps->max_input_signal;
+   *min = 0x101 * 

Re: [PATCH 7/7] drm/amd/display: Replace DRM private objects with subclassed DRM atomic state

2020-08-06 Thread Kazlauskas, Nicholas

On 2020-08-05 4:37 p.m., Rodrigo Siqueira wrote:

Hi,

I have some minor inline comments, but everything looks fine when I
tested it on Raven; feel free to add

Tested-by: Rodrigo Siqueira 

in the whole series.


Thanks for the reviews!

I can clean up the nitpicks for this patch and make a v2.

Regards,
Nicholas Kazlauskas



On 07/30, Nicholas Kazlauskas wrote:

[Why]
DM atomic check was structured in a way that we required old DC state
in order to dynamically add and remove planes and streams from the
context to build the DC state context for validation.

DRM private objects were used to carry over the last DC state and
were added to the context on nearly every commit - regardless of fast
or full so we could check whether or not the new state could affect
bandwidth.

The problem with this model is that DRM private objects do not
implicitly stall out other commits.

So if you have two commits touching separate DRM objects they could
run concurrently and potentially execute out of order - leading to a
use-after-free.

If we want this to be safe we have two options:
1. Stall out concurrent commits since they touch the same private object
2. Refactor DM to not require old DC state and drop private object usage

[How]
This implements approach #2 since it still allows for judder free
updates in multi-display scenarios.

I'll list the big changes in order at a high level:

1. Subclass DRM atomic state instead of using DRM private objects.

DC relied on the old state to determine which changes cause bandwidth
updates but now we have DM perform similar checks based on DRM state
instead - dropping the requirement for old state to exist at all.

This means that we can now build a new DC context from scratch whenever
we have something that DM thinks could affect bandwidth.

Whenever we need to rebuild bandwidth we now add all CRTCs and planes
to the DRM state in order to get the absolute set of DC streams and
DC planes.

This introduces a stall on other commits, but this stall already
exists because of the lock_and_validation logic and it's necessary
since updates may move around pipes and require full reprogramming.

2. Drop workarounds to add planes to maintain z-order early in atomic
check. This is no longer needed because of the changes for (1).

This also involves fixing up should_plane_reset checks since we can just
avoid resetting streams and planes when they haven't actually changed.

3. Rework dm_update_crtc_state and dm_update_plane_state to be single
pass instead of two pass.

This is necessary since we no longer have the dc_state to add and
remove planes to the context in and we want to defer creation to the
end of commit_check.

It also makes the logic a lot simpler to follow since as an added bonus.

Cc: Bhawanpreet Lakha 
Cc: Harry Wentland 
Cc: Leo Li 
Cc: Daniel Vetter 
Signed-off-by: Nicholas Kazlauskas 
---
  .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 720 +++---
  .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h |  11 +-
  2 files changed, 280 insertions(+), 451 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 59829ec81694..97a7dfc620e8 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1839,7 +1839,6 @@ static int dm_resume(void *handle)
struct drm_plane *plane;
struct drm_plane_state *new_plane_state;
struct dm_plane_state *dm_new_plane_state;
-   struct dm_atomic_state *dm_state = 
to_dm_atomic_state(dm->atomic_obj.state);
enum dc_connection_type new_connection_type = dc_connection_none;
struct dc_state *dc_state;
int i, r, j;
@@ -1879,11 +1878,6 @@ static int dm_resume(void *handle)
  
  		return 0;

}
-   /* Recreate dc_state - DC invalidates it when setting power state to 
S3. */
-   dc_release_state(dm_state->context);
-   dm_state->context = dc_create_state(dm->dc);
-   /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
-   dc_resource_state_construct(dm->dc, dm_state->context);
  
  	/* Before powering on DC we need to re-initialize DMUB. */

r = dm_dmub_hw_init(adev);
@@ -2019,11 +2013,51 @@ const struct amdgpu_ip_block_version dm_ip_block =
   * *WIP*
   */
  
+struct drm_atomic_state *dm_atomic_state_alloc(struct drm_device *dev)

+{
+   struct dm_atomic_state *dm_state;
+
+   dm_state = kzalloc(sizeof(*dm_state), GFP_KERNEL);


How about use GFP_ATOMIC here?


+
+   if (!dm_state)
+   return NULL;
+
+   if (drm_atomic_state_init(dev, &dm_state->base) < 0) {
+   kfree(dm_state);
+   return NULL;
+   }
+
+   return &dm_state->base;
+}
+
+void dm_atomic_state_free(struct drm_atomic_state *state)
+{
+   struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
+
+   if (dm_state->context) {
+   dc_release_state(dm_state->context);
+

Re: [PATCH 2/7] drm/amd/display: Reset plane when tiling flags change

2020-08-06 Thread Kazlauskas, Nicholas

On 2020-08-05 5:11 p.m., Rodrigo Siqueira wrote:

On 07/30, Nicholas Kazlauskas wrote:

[Why]
Enabling or disable DCC or switching between tiled and linear formats
can require bandwidth updates.

They're currently skipping all DC validation by being treated as purely
surface updates.

[How]
Treat tiling_flag changes (which encode DCC state) as a condition for
resetting the plane.

Cc: Bhawanpreet Lakha 
Cc: Rodrigo Siqueira 
Cc: Hersen Wu 
Signed-off-by: Nicholas Kazlauskas 
---
  .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 19 ---
  1 file changed, 16 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 7cc5ab90ce13..bf1881bd492c 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -8332,6 +8332,8 @@ static bool should_reset_plane(struct drm_atomic_state 
*state,
 * TODO: Come up with a more elegant solution for this.
 */
for_each_oldnew_plane_in_state(state, other, old_other_state, 
new_other_state, i) {
+   struct dm_plane_state *old_dm_plane_state, *new_dm_plane_state;
+
if (other->type == DRM_PLANE_TYPE_CURSOR)
continue;
  
@@ -8342,9 +8344,20 @@ static bool should_reset_plane(struct drm_atomic_state *state,

if (old_other_state->crtc != new_other_state->crtc)
return true;
  
-		/* TODO: Remove this once we can handle fast format changes. */

-   if (old_other_state->fb && new_other_state->fb &&
-   old_other_state->fb->format != new_other_state->fb->format)
+   /* Framebuffer checks fall at the end. */
+   if (!old_other_state->fb || !new_other_state->fb)
+   continue;
+
+   /* Pixel format changes can require bandwidth updates. */
+   if (old_other_state->fb->format != new_other_state->fb->format)
+   return true;
+
+   old_dm_plane_state = to_dm_plane_state(old_other_state);
+   new_dm_plane_state = to_dm_plane_state(new_other_state);
+
+   /* Tiling and DCC changes also require bandwidth updates. */
+   if (old_dm_plane_state->tiling_flags !=
+   new_dm_plane_state->tiling_flags)


Why not add a case when we move to a TMZ area?

Reviewed-by: Rodrigo Siqueira 


TMZ doesn't affect DML calculations or validation in this case so we can 
safely skip it.


Regards,
Nicholas Kazlauskas




return true;
}
  
--

2.25.1





___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 5/7] drm/amd/display: Reset plane for anything that's not a FAST update

2020-08-06 Thread Kazlauskas, Nicholas

On 2020-08-05 4:45 p.m., Rodrigo Siqueira wrote:

On 07/30, Nicholas Kazlauskas wrote:

[Why]
MEDIUM or FULL updates can require global validation or affect
bandwidth. By treating these all simply as surface updates we aren't
actually passing this through DC global validation.

[How]
There's currently no way to pass surface updates through DC global
validation, nor do I think it's a good idea to change the interface
to accept these.

DC global validation itself is currently stateless, and we can move
our update type checking to be stateless as well by duplicating DC
surface checks in DM based on DRM properties.

We wanted to rely on DC automatically determining this since DC knows
best, but DM is ultimately what fills in everything into DC plane
state so it does need to know as well.

There are basically only three paths that we exercise in DM today:

1) Cursor (async update)
2) Pageflip (fast update)
3) Full pipe programming (medium/full updates)

Which means that anything that's more than a pageflip really needs to
go down path #3.

So this change duplicates all the surface update checks based on DRM
state instead inside of should_reset_plane().

Next step is dropping dm_determine_update_type_for_commit and we no
longer require the old DC state at all for global validation.

Optimization can come later so we don't reset DC planes at all for
MEDIUM udpates and avoid validation, but we might require some extra
checks in DM to achieve this.


How about adding this optimization description in our TODO list
under-display folder?

Reviewed-by: Rodrigo Siqueira 


Sure, I'll make another patch to clean up some of the TODO items in the 
text file.


Regards,
Nicholas Kazlauskas

  

Cc: Bhawanpreet Lakha 
Cc: Hersen Wu 
Signed-off-by: Nicholas Kazlauskas 
---
  .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 25 +++
  1 file changed, 25 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 0d5f45742bb5..2cbb29199e61 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -8336,6 +8336,31 @@ static bool should_reset_plane(struct drm_atomic_state 
*state,
if (old_other_state->crtc != new_other_state->crtc)
return true;
  
+		/* Src/dst size and scaling updates. */

+   if (old_other_state->src_w != new_other_state->src_w ||
+   old_other_state->src_h != new_other_state->src_h ||
+   old_other_state->crtc_w != new_other_state->crtc_w ||
+   old_other_state->crtc_h != new_other_state->crtc_h)
+   return true;
+
+   /* Rotation / mirroring updates. */
+   if (old_other_state->rotation != new_other_state->rotation)
+   return true;
+
+   /* Blending updates. */
+   if (old_other_state->pixel_blend_mode !=
+   new_other_state->pixel_blend_mode)
+   return true;
+
+   /* Alpha updates. */
+   if (old_other_state->alpha != new_other_state->alpha)
+   return true;
+
+   /* Colorspace changes. */
+   if (old_other_state->color_range != 
new_other_state->color_range ||
+   old_other_state->color_encoding != 
new_other_state->color_encoding)
+   return true;
+
/* Framebuffer checks fall at the end. */
if (!old_other_state->fb || !new_other_state->fb)
continue;
--
2.25.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flists.freedesktop.org%2Fmailman%2Flistinfo%2Famd-gfx&data=02%7C01%7CRodrigo.Siqueira%40amd.com%7Ccc095e7ce6164f529e2708d834c86d1b%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C637317382766607890&sdata=omLC%2BizXVEjjGe6IylBpniZzyUGlzTATrgRoWEo6dHc%3D&reserved=0




___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 5/7] drm/amd/display: Reset plane for anything that's not a FAST update

2020-08-07 Thread Kazlauskas, Nicholas

On 2020-08-07 4:34 a.m., dan...@ffwll.ch wrote:

On Thu, Jul 30, 2020 at 04:36:40PM -0400, Nicholas Kazlauskas wrote:

[Why]
MEDIUM or FULL updates can require global validation or affect
bandwidth. By treating these all simply as surface updates we aren't
actually passing this through DC global validation.

[How]
There's currently no way to pass surface updates through DC global
validation, nor do I think it's a good idea to change the interface
to accept these.

DC global validation itself is currently stateless, and we can move
our update type checking to be stateless as well by duplicating DC
surface checks in DM based on DRM properties.

We wanted to rely on DC automatically determining this since DC knows
best, but DM is ultimately what fills in everything into DC plane
state so it does need to know as well.

There are basically only three paths that we exercise in DM today:

1) Cursor (async update)
2) Pageflip (fast update)
3) Full pipe programming (medium/full updates)

Which means that anything that's more than a pageflip really needs to
go down path #3.

So this change duplicates all the surface update checks based on DRM
state instead inside of should_reset_plane().

Next step is dropping dm_determine_update_type_for_commit and we no
longer require the old DC state at all for global validation.


I think we do something similar in i915, where we have a "nothing except
base address changed" fast path, but for anything else we fully compute a
new state. Obviously you should try to keep global state synchronization
to a minimum for this step, so it's not entirely only 2 options.

Once we have the states, we compare them and figure out whether we can get
away with a fast modeset operation (maybe what you guys call medium
update). Anyway I think being slightly more aggressive with computing full
state, and then falling back to more optimized update again is a good
approach. Only risk is if we you have too much synchronization in your
locking (e.g. modern compositors do like to change tiling and stuff,
especially once you have modifiers enabled, so this shouldn't cause a sync
across crtc except when absolutely needed).
-Daniel


Sounds like the right approach then.

We can support tiling changes in the fast path, but the more optimized 
version of that last check is really linear <-> tiled. That requires 
global validation with DC to revalidate bandwidth and calculate 
requestor parameters for HW. So we'll have to stall for some of these 
changes unfortunately since we need the full HW state for validation.


Regards,
Nicholas Kazlauskas





Optimization can come later so we don't reset DC planes at all for
MEDIUM udpates and avoid validation, but we might require some extra
checks in DM to achieve this.

Cc: Bhawanpreet Lakha 
Cc: Hersen Wu 
Signed-off-by: Nicholas Kazlauskas 
---
  .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 25 +++
  1 file changed, 25 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 0d5f45742bb5..2cbb29199e61 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -8336,6 +8336,31 @@ static bool should_reset_plane(struct drm_atomic_state 
*state,
if (old_other_state->crtc != new_other_state->crtc)
return true;
  
+		/* Src/dst size and scaling updates. */

+   if (old_other_state->src_w != new_other_state->src_w ||
+   old_other_state->src_h != new_other_state->src_h ||
+   old_other_state->crtc_w != new_other_state->crtc_w ||
+   old_other_state->crtc_h != new_other_state->crtc_h)
+   return true;
+
+   /* Rotation / mirroring updates. */
+   if (old_other_state->rotation != new_other_state->rotation)
+   return true;
+
+   /* Blending updates. */
+   if (old_other_state->pixel_blend_mode !=
+   new_other_state->pixel_blend_mode)
+   return true;
+
+   /* Alpha updates. */
+   if (old_other_state->alpha != new_other_state->alpha)
+   return true;
+
+   /* Colorspace changes. */
+   if (old_other_state->color_range != 
new_other_state->color_range ||
+   old_other_state->color_encoding != 
new_other_state->color_encoding)
+   return true;
+
/* Framebuffer checks fall at the end. */
if (!old_other_state->fb || !new_other_state->fb)
continue;
--
2.25.1

___
dri-devel mailing list
dri-de...@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel




___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesk

Re: [PATCH 3/7] drm/amd/display: Avoid using unvalidated tiling_flags and tmz_surface in prepare_planes

2020-08-07 Thread Kazlauskas, Nicholas

On 2020-08-07 4:30 a.m., dan...@ffwll.ch wrote:

On Thu, Jul 30, 2020 at 04:36:38PM -0400, Nicholas Kazlauskas wrote:

[Why]
We're racing with userspace as the flags could potentially change
from when we acquired and validated them in commit_check.


Uh ... I didn't know these could change. I think my comments on Bas'
series are even more relevant now. I think long term would be best to bake
these flags in at addfb time when modifiers aren't set. And otherwise
always use the modifiers flag, and completely ignore the legacy flags
here.
-Daniel



There's a set tiling/mod flags IOCTL that can be called after addfb 
happens, so unless there's some sort of driver magic preventing this 
from working when it's already been pinned for scanout then I don't see 
anything stopping this from happening.


I still need to review the modifiers series in a little more detail but 
that looks like a good approach to fixing these kind of issues.


Regards,
Nicholas Kazlauskas



[How]
We unfortunately can't drop this function in its entirety from
prepare_planes since we don't know the afb->address at commit_check
time yet.

So instead of querying new tiling_flags and tmz_surface use the ones
from the plane_state directly.

While we're at it, also update the force_disable_dcc option based
on the state from atomic check.

Cc: Bhawanpreet Lakha 
Cc: Rodrigo Siqueira 
Signed-off-by: Nicholas Kazlauskas 
---
  .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 36 ++-
  1 file changed, 19 insertions(+), 17 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index bf1881bd492c..f78c09c9585e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -5794,14 +5794,8 @@ static int dm_plane_helper_prepare_fb(struct drm_plane 
*plane,
struct list_head list;
struct ttm_validate_buffer tv;
struct ww_acquire_ctx ticket;
-   uint64_t tiling_flags;
uint32_t domain;
int r;
-   bool tmz_surface = false;
-   bool force_disable_dcc = false;
-
-   dm_plane_state_old = to_dm_plane_state(plane->state);
-   dm_plane_state_new = to_dm_plane_state(new_state);
  
  	if (!new_state->fb) {

DRM_DEBUG_DRIVER("No FB bound\n");
@@ -5845,27 +5839,35 @@ static int dm_plane_helper_prepare_fb(struct drm_plane 
*plane,
return r;
}
  
-	amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);

-
-   tmz_surface = amdgpu_bo_encrypted(rbo);
-
ttm_eu_backoff_reservation(&ticket, &list);
  
  	afb->address = amdgpu_bo_gpu_offset(rbo);
  
  	amdgpu_bo_ref(rbo);
  
+	/**

+* We don't do surface updates on planes that have been newly created,
+* but we also don't have the afb->address during atomic check.
+*
+* Fill in buffer attributes depending on the address here, but only on
+* newly created planes since they're not being used by DC yet and this
+* won't modify global state.
+*/
+   dm_plane_state_old = to_dm_plane_state(plane->state);
+   dm_plane_state_new = to_dm_plane_state(new_state);
+
if (dm_plane_state_new->dc_state &&
-   dm_plane_state_old->dc_state != 
dm_plane_state_new->dc_state) {
-   struct dc_plane_state *plane_state = 
dm_plane_state_new->dc_state;
+   dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
+   struct dc_plane_state *plane_state =
+   dm_plane_state_new->dc_state;
+   bool force_disable_dcc = !plane_state->dcc.enable;
  
-		force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;

fill_plane_buffer_attributes(
adev, afb, plane_state->format, plane_state->rotation,
-   tiling_flags, &plane_state->tiling_info,
-   &plane_state->plane_size, &plane_state->dcc,
-   &plane_state->address, tmz_surface,
-   force_disable_dcc);
+   dm_plane_state_new->tiling_flags,
+   &plane_state->tiling_info, &plane_state->plane_size,
+   &plane_state->dcc, &plane_state->address,
+   dm_plane_state_new->tmz_surface, force_disable_dcc);
}
  
  	return 0;

--
2.25.1

___
dri-devel mailing list
dri-de...@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel




___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 7/7] drm/amd/display: Replace DRM private objects with subclassed DRM atomic state

2020-08-07 Thread Kazlauskas, Nicholas

On 2020-08-07 4:52 a.m., dan...@ffwll.ch wrote:

On Thu, Jul 30, 2020 at 04:36:42PM -0400, Nicholas Kazlauskas wrote:

@@ -440,7 +431,7 @@ struct dm_crtc_state {
  #define to_dm_crtc_state(x) container_of(x, struct dm_crtc_state, base)
  
  struct dm_atomic_state {

-   struct drm_private_state base;
+   struct drm_atomic_state base;
  
  	struct dc_state *context;


Also curiosity: Can't we just embed dc_state here, instead of a pointer?
Then it would become a lot more obvious that mostly this is a state object
container like drm_atomic_state, but for the DC specific state structures.
And we could look into moving the actual DC states into drm private states
perhaps (if that helps with the code structure and overall flow).

Maybe as next steps.
-Daniel



It's the refcounting that's the problem with this stuff. I'd like to 
move DC to a model where we have no memory allocation/ownership but that 
might be a bit of a more long term plan at this point.


Same with dc_plane_state and dc_stream_state as well - these could exist 
on the DRM objects as long as they're not refcounted.


Regards,
Nicholas Kazlauskas
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH v2] drm/amd/display: use correct scale for actual_brightness

2020-08-18 Thread Kazlauskas, Nicholas

No objections from my side - and thanks for addressing my feedback.

Regards,
Nicholas Kazlauskas

On 2020-08-18 12:15 p.m., Alex Deucher wrote:

Applied.  Thanks!

Alex

On Mon, Aug 17, 2020 at 1:59 PM Alex Deucher  wrote:


On Mon, Aug 17, 2020 at 3:09 AM Alexander Monakov  wrote:


Ping.


Patch looks good to me:
Reviewed-by: Alex Deucher 

Nick, unless you have any objections, I'll go ahead and apply it.

Alex



On Tue, 4 Aug 2020, Alexander Monakov wrote:


Documentation for sysfs backlight level interface requires that
values in both 'brightness' and 'actual_brightness' files are
interpreted to be in range from 0 to the value given in the
'max_brightness' file.

With amdgpu, max_brightness gives 255, and values written by the user
into 'brightness' are internally rescaled to a wider range. However,
reading from 'actual_brightness' gives the raw register value without
inverse rescaling. This causes issues for various userspace tools such
as PowerTop and systemd that expect the value to be in the correct
range.

Introduce a helper to retrieve internal backlight range. Use it to
reimplement 'convert_brightness' as 'convert_brightness_from_user' and
introduce 'convert_brightness_to_user'.

Bug: https://bugzilla.kernel.org/show_bug.cgi?id=203905
Bug: https://gitlab.freedesktop.org/drm/amd/-/issues/1242
Cc: Alex Deucher 
Cc: Nicholas Kazlauskas 
Signed-off-by: Alexander Monakov 
---
v2: split convert_brightness to &_from_user and &_to_user (Nicholas)

  .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 81 +--
  1 file changed, 40 insertions(+), 41 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 710edc70e37e..b60a763f3f95 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -2881,51 +2881,50 @@ static int set_backlight_via_aux(struct dc_link *link, 
uint32_t brightness)
   return rc ? 0 : 1;
  }

-static u32 convert_brightness(const struct amdgpu_dm_backlight_caps *caps,
-   const uint32_t user_brightness)
+static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
+ unsigned *min, unsigned *max)
  {
- u32 min, max, conversion_pace;
- u32 brightness = user_brightness;
-
   if (!caps)
- goto out;
+ return 0;

- if (!caps->aux_support) {
- max = caps->max_input_signal;
- min = caps->min_input_signal;
- /*
-  * The brightness input is in the range 0-255
-  * It needs to be rescaled to be between the
-  * requested min and max input signal
-  * It also needs to be scaled up by 0x101 to
-  * match the DC interface which has a range of
-  * 0 to 0x
-  */
- conversion_pace = 0x101;
- brightness =
- user_brightness
- * conversion_pace
- * (max - min)
- / AMDGPU_MAX_BL_LEVEL
- + min * conversion_pace;
+ if (caps->aux_support) {
+ // Firmware limits are in nits, DC API wants millinits.
+ *max = 1000 * caps->aux_max_input_signal;
+ *min = 1000 * caps->aux_min_input_signal;
   } else {
- /* TODO
-  * We are doing a linear interpolation here, which is OK but
-  * does not provide the optimal result. We probably want
-  * something close to the Perceptual Quantizer (PQ) curve.
-  */
- max = caps->aux_max_input_signal;
- min = caps->aux_min_input_signal;
-
- brightness = (AMDGPU_MAX_BL_LEVEL - user_brightness) * min
-+ user_brightness * max;
- // Multiple the value by 1000 since we use millinits
- brightness *= 1000;
- brightness = DIV_ROUND_CLOSEST(brightness, AMDGPU_MAX_BL_LEVEL);
+ // Firmware limits are 8-bit, PWM control is 16-bit.
+ *max = 0x101 * caps->max_input_signal;
+ *min = 0x101 * caps->min_input_signal;
   }
+ return 1;
+}

-out:
- return brightness;
+static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps 
*caps,
+ uint32_t brightness)
+{
+ unsigned min, max;
+
+ if (!get_brightness_range(caps, &min, &max))
+ return brightness;
+
+ // Rescale 0..255 to min..max
+ return min + DIV_ROUND_CLOSEST((max - min) * brightness,
+AMDGPU_MAX_BL_LEVEL);
+}
+
+static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps 
*caps,
+   uint32_t brightness)
+{
+ unsigned min, max;
+
+ if (!get_brightness_range(caps, &min, &max))
+ return brightness;
+
+ if (bright

Re: [PATCH] drm/amdgpu/dc: Require primary plane to be enabled whenever the CRTC is

2020-08-21 Thread Kazlauskas, Nicholas

On 2020-08-21 12:57 p.m., Michel Dänzer wrote:

From: Michel Dänzer 

Don't check drm_crtc_state::active for this either, per its
documentation in include/drm/drm_crtc.h:

  * Hence drivers must not consult @active in their various
  * &drm_mode_config_funcs.atomic_check callback to reject an atomic
  * commit.

The atomic helpers disable the CRTC as needed for disabling the primary
plane.

This prevents at least the following problems if the primary plane gets
disabled (e.g. due to destroying the FB assigned to the primary plane,
as happens e.g. with mutter in Wayland mode):

* Toggling CRTC active to 1 failed if the cursor plane was enabled
   (e.g. via legacy DPMS property & cursor ioctl).
* Enabling the cursor plane failed, e.g. via the legacy cursor ioctl.


We previously had the requirement that the primary plane must be enabled 
but some userspace expects that they can enable just the overlay plane 
without anything else.


I think the chromuiumos atomictest validates that this works as well:

So is DRM going forward then with the expectation that this is wrong 
behavior from userspace?


We require at least one plane to be enabled to display a cursor, but it 
doesn't necessarily need to be the primary.


Regards,
Nicholas Kazlauskas



GitLab: https://gitlab.gnome.org/GNOME/mutter/-/issues/1108
GitLab: https://gitlab.gnome.org/GNOME/mutter/-/issues/1165
GitLab: https://gitlab.gnome.org/GNOME/mutter/-/issues/1344
Suggested-by: Daniel Vetter 
Signed-off-by: Michel Dänzer 
---

Note that this will cause some IGT tests to fail without
https://patchwork.freedesktop.org/series/80904/ .

  .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 33 +++
  1 file changed, 11 insertions(+), 22 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 897d60ade1e4..33c5739e221b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -5262,19 +5262,6 @@ static void dm_crtc_helper_disable(struct drm_crtc *crtc)
  {
  }
  
-static bool does_crtc_have_active_cursor(struct drm_crtc_state *new_crtc_state)

-{
-   struct drm_device *dev = new_crtc_state->crtc->dev;
-   struct drm_plane *plane;
-
-   drm_for_each_plane_mask(plane, dev, new_crtc_state->plane_mask) {
-   if (plane->type == DRM_PLANE_TYPE_CURSOR)
-   return true;
-   }
-
-   return false;
-}
-
  static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
  {
struct drm_atomic_state *state = new_crtc_state->state;
@@ -5338,19 +5325,21 @@ static int dm_crtc_helper_atomic_check(struct drm_crtc 
*crtc,
return ret;
}
  
-	/* In some use cases, like reset, no stream is attached */

-   if (!dm_crtc_state->stream)
-   return 0;
-
/*
-* We want at least one hardware plane enabled to use
-* the stream with a cursor enabled.
+* We require the primary plane to be enabled whenever the CRTC is,
+* otherwise the legacy cursor ioctl helper may end up trying to enable
+* the cursor plane while the primary plane is disabled, which is not
+* supported by the hardware. And there is legacy userspace which stops
+* using the HW cursor altogether in response to the resulting EINVAL.
 */
-   if (state->enable && state->active &&
-   does_crtc_have_active_cursor(state) &&
-   dm_crtc_state->active_planes == 0)
+   if (state->enable &&
+   !(state->plane_mask & drm_plane_mask(crtc->primary)))
return -EINVAL;
  
+	/* In some use cases, like reset, no stream is attached */

+   if (!dm_crtc_state->stream)
+   return 0;
+
if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
return 0;
  



___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amd/display: Add DPCS regs for dcn3 link encoder

2020-08-24 Thread Kazlauskas, Nicholas

On 2020-08-24 11:11 a.m., Bhawanpreet Lakha wrote:

dpcs reg are missing for dcn3 link encoder regs list, so add them.

Also remove
DPCSTX_DEBUG_CONFIG and RDPCSTX_DEBUG_CONFIG as they are unused and
cause compile errors for dcn3

Signed-off-by: Bhawanpreet Lakha 


Reviewed-by: Nicholas Kazlauskas 

Regards,
Nicholas Kazlauskas


---
  drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h | 2 --
  drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c | 1 +
  2 files changed, 1 insertion(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h 
b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h
index dcbf28dd72d4..864acd695cbb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h
@@ -231,8 +231,6 @@
SRI(RDPCSTX_PHY_FUSE3, RDPCSTX, id), \
SRI(DPCSTX_TX_CLOCK_CNTL, DPCSTX, id), \
SRI(DPCSTX_TX_CNTL, DPCSTX, id), \
-   SRI(DPCSTX_DEBUG_CONFIG, DPCSTX, id), \
-   SRI(RDPCSTX_DEBUG_CONFIG, RDPCSTX, id), \
SR(RDPCSTX0_RDPCSTX_SCRATCH)
  
  
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c

index 957fc37b971e..8be4f21169d0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
@@ -491,6 +491,7 @@ static const struct dcn10_link_enc_hpd_registers 
link_enc_hpd_regs[] = {
  [id] = {\
LE_DCN3_REG_LIST(id), \
UNIPHY_DCN2_REG_LIST(phyid), \
+   DPCS_DCN2_REG_LIST(id), \
SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \
  }
  



___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amdgpu/dc: Require primary plane to be enabled whenever the CRTC is

2020-08-25 Thread Kazlauskas, Nicholas

On 2020-08-22 5:59 a.m., Michel Dänzer wrote:

On 2020-08-21 8:07 p.m., Kazlauskas, Nicholas wrote:

On 2020-08-21 12:57 p.m., Michel Dänzer wrote:

From: Michel Dänzer 

Don't check drm_crtc_state::active for this either, per its
documentation in include/drm/drm_crtc.h:

   * Hence drivers must not consult @active in their various
   * &drm_mode_config_funcs.atomic_check callback to reject an atomic
   * commit.

The atomic helpers disable the CRTC as needed for disabling the primary
plane.

This prevents at least the following problems if the primary plane gets
disabled (e.g. due to destroying the FB assigned to the primary plane,
as happens e.g. with mutter in Wayland mode):

* Toggling CRTC active to 1 failed if the cursor plane was enabled
    (e.g. via legacy DPMS property & cursor ioctl).
* Enabling the cursor plane failed, e.g. via the legacy cursor ioctl.


We previously had the requirement that the primary plane must be enabled
but some userspace expects that they can enable just the overlay plane
without anything else.

I think the chromuiumos atomictest validates that this works as well:

So is DRM going forward then with the expectation that this is wrong
behavior from userspace?

We require at least one plane to be enabled to display a cursor, but it
doesn't necessarily need to be the primary.


It's a "pick your poison" situation:

1) Currently the checks are invalid (atomic_check must not decide based
on drm_crtc_state::active), and it's easy for legacy KMS userspace to
accidentally hit errors trying to enable/move the cursor or switch DPMS
off → on.

2) Accurately rejecting only atomic states where the cursor plane is
enabled but all other planes are off would break the KMS helper code,
which can only deal with the "CRTC on & primary plane off is not
allowed" case specifically.

3) This patch addresses 1) & 2) but may break existing atomic userspace
which wants to enable an overlay plane while disabling the primary plane.


I do think in principle atomic userspace is expected to handle case 3)
and leave the primary plane enabled. However, this is not ideal from an
energy consumption PoV. Therefore, here's another idea for a possible
way out of this quagmire:

amdgpu_dm does not reject any atomic states based on which planes are
enabled in it. If the cursor plane is enabled but all other planes are
off, amdgpu_dm internally either:

a) Enables an overlay plane and makes it invisible, e.g. by assigning a
minimum size FB with alpha = 0.

b) Enables the primary plane and assigns a minimum size FB (scaled up to
the required size) containing all black, possibly using compression.
(Trying to minimize the memory bandwidth)


Does either of these seem feasible? If both do, which one would be
preferable?




It's really the same solution since DCN doesn't make a distinction 
between primary or overlay planes in hardware. DCE doesn't have overlay 
planes enabled so this is not relevant there.


The old behavior (pre 5.1?) was to silently accept the commit even 
though the screen would be completely black instead of outright 
rejecting the commit.


I almost wonder if that makes more sense in the short term here since 
the only "userspace" affected here is IGT. We'll fail the CRC checks, 
but no userspace actually tries to actively use a cursor with no primary 
plane enabled from my understanding.


In the long term I think we can work on getting cursor actually on the 
screen in this case, though I can't say I really like having to reserve 
some small buffer (eg. 16x16) for allowing lightup on this corner case.


Regards,
Nicholas Kazlauskas
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 1/3] drm/amdgpu: Move existing pflip fields into separate struct

2020-09-09 Thread Kazlauskas, Nicholas

On 2020-09-09 10:28 a.m., Aurabindo Pillai wrote:

[Why&How]
To refactor DM IRQ management, all fields used by IRQ is best moved
to a separate struct so that main amdgpu_crtc struct need not be changed
Location of the new struct shall be in DM

Signed-off-by: Aurabindo Pillai 


Reviewed-by: Nicholas Kazlauskas 

Regards,
Nicholas Kazlauskas


---
  drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h  |  4 ++-
  .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c |  4 +--
  .../display/amdgpu_dm/amdgpu_dm_irq_params.h  | 33 +++
  3 files changed, 38 insertions(+), 3 deletions(-)
  create mode 100644 
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index aa3e22be4f2d..345cb0464370 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -46,6 +46,7 @@
  
  #include 

  #include "modules/inc/mod_freesync.h"
+#include "amdgpu_dm_irq_params.h"
  
  struct amdgpu_bo;

  struct amdgpu_device;
@@ -410,7 +411,8 @@ struct amdgpu_crtc {
struct amdgpu_flip_work *pflip_works;
enum amdgpu_flip_status pflip_status;
int deferred_flip_completion;
-   u32 last_flip_vblank;
+   /* parameters access from DM IRQ handler */
+   struct dm_irq_params dm_irq_params;
/* pll sharing */
struct amdgpu_atom_ss ss;
bool ss_enabled;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index cb624ee70545..40814cdd8c92 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -389,7 +389,7 @@ static void dm_pflip_high_irq(void *interrupt_params)
 * of pageflip completion, so last_flip_vblank is the forbidden count
 * for queueing new pageflips if vsync + VRR is enabled.
 */
-   amdgpu_crtc->last_flip_vblank =
+   amdgpu_crtc->dm_irq_params.last_flip_vblank =
amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
  
  	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;

@@ -7248,7 +7248,7 @@ static void amdgpu_dm_commit_planes(struct 
drm_atomic_state *state,
 * on late submission of flips.
 */
spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
-   last_flip_vblank = acrtc_attach->last_flip_vblank;
+   last_flip_vblank = 
acrtc_attach->dm_irq_params.last_flip_vblank;
spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
}
  
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h

new file mode 100644
index ..55ef237eed8b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __AMDGPU_DM_IRQ_PARAMS_H__
+#define __AMDGPU_DM_IRQ_PARAMS_H__
+
+struct dm_irq_params {
+   u32 last_flip_vblank;
+};
+
+#endif /* __AMDGPU_DM_IRQ_PARAMS_H__ */



___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 2/3] drm/amd/display: Refactor to prevent crtc state access in DM IRQ handler

2020-09-09 Thread Kazlauskas, Nicholas

On 2020-09-09 10:28 a.m., Aurabindo Pillai wrote:

[Why&How]
Currently commit_tail holds global locks and wait for dependencies which is
against the DRM API contracts. Inorder to fix this, IRQ handler should be able
to run without having to access crtc state. Required parameters are copied over
so that they can be directly accessed from the interrupt handler

Signed-off-by: Aurabindo Pillai 
---
  .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 115 ++
  .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h |   1 -
  .../display/amdgpu_dm/amdgpu_dm_irq_params.h  |   4 +
  3 files changed, 68 insertions(+), 52 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 40814cdd8c92..0603436a3313 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -192,17 +192,14 @@ static u32 dm_vblank_get_counter(struct amdgpu_device 
*adev, int crtc)
return 0;
else {
struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
-   struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
-   acrtc->base.state);
  
-

-   if (acrtc_state->stream == NULL) {
+   if (acrtc->dm_irq_params.stream == NULL) {
DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
  crtc);
return 0;
}
  
-		return dc_stream_get_vblank_counter(acrtc_state->stream);

+   return 
dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
}
  }
  
@@ -215,10 +212,8 @@ static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,

return -EINVAL;
else {
struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
-   struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
-   acrtc->base.state);
  
-		if (acrtc_state->stream ==  NULL) {

+   if (acrtc->dm_irq_params.stream ==  NULL) {
DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
  crtc);
return 0;
@@ -228,7 +223,7 @@ static int dm_crtc_get_scanoutpos(struct amdgpu_device 
*adev, int crtc,
 * TODO rework base driver to use values directly.
 * for now parse it back into reg-format
 */
-   dc_stream_get_scanoutpos(acrtc_state->stream,
+   dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
 &v_blank_start,
 &v_blank_end,
 &h_position,
@@ -287,6 +282,14 @@ get_crtc_by_otg_inst(struct amdgpu_device *adev,
return NULL;
  }
  
+static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)

+{
+   return acrtc->dm_irq_params.freesync_config.state ==
+  VRR_STATE_ACTIVE_VARIABLE ||
+  acrtc->dm_irq_params.freesync_config.state ==
+  VRR_STATE_ACTIVE_FIXED;
+}
+
  static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
  {
return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
@@ -307,7 +310,6 @@ static void dm_pflip_high_irq(void *interrupt_params)
struct amdgpu_device *adev = irq_params->adev;
unsigned long flags;
struct drm_pending_vblank_event *e;
-   struct dm_crtc_state *acrtc_state;
uint32_t vpos, hpos, v_blank_start, v_blank_end;
bool vrr_active;
  
@@ -339,12 +341,11 @@ static void dm_pflip_high_irq(void *interrupt_params)

if (!e)
WARN_ON(1);
  
-	acrtc_state = to_dm_crtc_state(amdgpu_crtc->base.state);

-   vrr_active = amdgpu_dm_vrr_active(acrtc_state);
+   vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
  
  	/* Fixed refresh rate, or VRR scanout position outside front-porch? */

if (!vrr_active ||
-   !dc_stream_get_scanoutpos(acrtc_state->stream, &v_blank_start,
+   !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, 
&v_blank_start,
  &v_blank_end, &hpos, &vpos) ||
(vpos < v_blank_start)) {
/* Update to correct count and vblank timestamp if racing with
@@ -405,17 +406,17 @@ static void dm_vupdate_high_irq(void *interrupt_params)
struct common_irq_params *irq_params = interrupt_params;
struct amdgpu_device *adev = irq_params->adev;
struct amdgpu_crtc *acrtc;
-   struct dm_crtc_state *acrtc_state;
unsigned long flags;
+   int vrr_active;
  
  	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
  
  	if (acrtc) {

-   acrtc_state = to_dm_crtc_state(acrtc->base.state);
+   vrr_active

Re: [PATCH 3/3] drm/amd/display: Move disable interrupt into commit tail

2020-09-09 Thread Kazlauskas, Nicholas

On 2020-09-09 10:28 a.m., Aurabindo Pillai wrote:

[Why&How]
Since there is no need for accessing crtc state in the interrupt
handler, interrupts need not be disabled well in advance, and
can be moved to commit_tail where it should be.

Signed-off-by: Aurabindo Pillai 


This patch is fine in idea, but it'll need the reference counting fixed 
in patch #2 first and be adjusted after.


Regards,
Nicholas Kazlauskas


---
  .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 38 +--
  1 file changed, 10 insertions(+), 28 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 0603436a3313..a40de242e04a 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -7488,34 +7488,6 @@ static int amdgpu_dm_atomic_commit(struct drm_device 
*dev,
   struct drm_atomic_state *state,
   bool nonblock)
  {
-   struct drm_crtc *crtc;
-   struct drm_crtc_state *old_crtc_state, *new_crtc_state;
-   struct amdgpu_device *adev = drm_to_adev(dev);
-   int i;
-
-   /*
-* We evade vblank and pflip interrupts on CRTCs that are undergoing
-* a modeset, being disabled, or have no active planes.
-*
-* It's done in atomic commit rather than commit tail for now since
-* some of these interrupt handlers access the current CRTC state and
-* potentially the stream pointer itself.
-*
-* Since the atomic state is swapped within atomic commit and not within
-* commit tail this would leave to new state (that hasn't been 
committed yet)
-* being accesssed from within the handlers.
-*
-* TODO: Fix this so we can do this in commit tail and not have to block
-* in atomic check.
-*/
-   for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, 
new_crtc_state, i) {
-   struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
-
-   if (old_crtc_state->active &&
-   (!new_crtc_state->active ||
-drm_atomic_crtc_needs_modeset(new_crtc_state)))
-   manage_dm_interrupts(adev, acrtc, false);
-   }
/*
 * Add check here for SoC's that support hardware cursor plane, to
 * unset legacy_cursor_update
@@ -7566,6 +7538,16 @@ static void amdgpu_dm_atomic_commit_tail(struct 
drm_atomic_state *state)
dc_resource_state_copy_construct_current(dm->dc, dc_state);
}
  
+	for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,

+  new_crtc_state, i) {
+   acrtc = to_amdgpu_crtc(crtc);
+
+   if (old_crtc_state->active &&
+   (!new_crtc_state->active ||
+drm_atomic_crtc_needs_modeset(new_crtc_state)))
+   manage_dm_interrupts(adev, acrtc, false);
+   }
+
/* update changed items */
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, 
new_crtc_state, i) {
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);



___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH v2 3/3] drm/amd/display: Move disable interrupt into commit tail

2020-09-11 Thread Kazlauskas, Nicholas

On 2020-09-11 12:27 p.m., Aurabindo Pillai wrote:

[Why&How]
Since there is no need for accessing crtc state in the interrupt
handler, interrupts need not be disabled well in advance, and
can be moved to commit_tail where it should be.

Signed-off-by: Aurabindo Pillai 
---
  .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 41 ++-
  1 file changed, 13 insertions(+), 28 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 1455acf5beca..b5af1f692499 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -7488,34 +7488,6 @@ static int amdgpu_dm_atomic_commit(struct drm_device 
*dev,
   struct drm_atomic_state *state,
   bool nonblock)
  {
-   struct drm_crtc *crtc;
-   struct drm_crtc_state *old_crtc_state, *new_crtc_state;
-   struct amdgpu_device *adev = drm_to_adev(dev);
-   int i;
-
-   /*
-* We evade vblank and pflip interrupts on CRTCs that are undergoing
-* a modeset, being disabled, or have no active planes.
-*
-* It's done in atomic commit rather than commit tail for now since
-* some of these interrupt handlers access the current CRTC state and
-* potentially the stream pointer itself.
-*
-* Since the atomic state is swapped within atomic commit and not within
-* commit tail this would leave to new state (that hasn't been 
committed yet)
-* being accesssed from within the handlers.
-*
-* TODO: Fix this so we can do this in commit tail and not have to block
-* in atomic check.
-*/
-   for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, 
new_crtc_state, i) {
-   struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
-
-   if (old_crtc_state->active &&
-   (!new_crtc_state->active ||
-drm_atomic_crtc_needs_modeset(new_crtc_state)))
-   manage_dm_interrupts(adev, acrtc, false);
-   }
/*
 * Add check here for SoC's that support hardware cursor plane, to
 * unset legacy_cursor_update
@@ -7566,6 +7538,19 @@ static void amdgpu_dm_atomic_commit_tail(struct 
drm_atomic_state *state)
dc_resource_state_copy_construct_current(dm->dc, dc_state);
}
  
+	for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,

+  new_crtc_state, i) {
+   acrtc = to_amdgpu_crtc(crtc);
+   dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+
+   if (old_crtc_state->active &&
+   (!new_crtc_state->active ||
+drm_atomic_crtc_needs_modeset(new_crtc_state))) {
+   manage_dm_interrupts(adev, acrtc, false);
+   dc_stream_release(dm_old_crtc_state->stream);


Please include this dc_stream_release() in patch #2 as well to prevent 
memory leaks during bisections.


With that change, this series is Reviewed-by: Nicholas Kazlauskas 



Regards,
Nicholas Kazlauskas


+   }
+   }
+
/* update changed items */
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, 
new_crtc_state, i) {
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);



___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH v2 3/4] drm/amd/display: Add pipe_state tracepoint

2020-09-11 Thread Kazlauskas, Nicholas

On 2020-09-11 10:59 a.m., Rodrigo Siqueira wrote:

This commit introduces a trace mechanism for struct pipe_ctx by adding a
middle layer struct in the amdgpu_dm_trace.h for capturing the most
important data from struct pipe_ctx and showing its data via tracepoint.
This tracepoint was added to dc.c and dcn10_hw_sequencer, however, it
can be added to other DCN architecture.

Co-developed-by: Nicholas Kazlauskas 
Signed-off-by: Nicholas Kazlauskas 
Signed-off-by: Rodrigo Siqueira 



This patch, while very useful, unfortunately pulls in a lot of DM code 
into DC so I would prefer to hold off on this one for now.


It would be better if this had a proper DC interface for tracing/logging 
these states. If the API was more like how we handle tracing register 
reads/writes this would be cleaner.


Regards,
Nicholas Kazlauskas


---
  .../amd/display/amdgpu_dm/amdgpu_dm_trace.h   | 172 ++
  drivers/gpu/drm/amd/display/dc/core/dc.c  |  11 ++
  .../amd/display/dc/dcn10/dcn10_hw_sequencer.c |  17 +-
  3 files changed, 195 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h
index 5fb4c4a5c349..53f62506e17c 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h
@@ -376,6 +376,178 @@ TRACE_EVENT(amdgpu_dm_atomic_check_finish,
  __entry->async_update, __entry->allow_modeset)
  );
  
+#ifndef _AMDGPU_DM_TRACE_STRUCTS_DEFINED_

+#define _AMDGPU_DM_TRACE_STRUCTS_DEFINED_
+
+struct amdgpu_dm_trace_pipe_state {
+   int pipe_idx;
+   const void *stream;
+   int stream_w;
+   int stream_h;
+   int dst_x;
+   int dst_y;
+   int dst_w;
+   int dst_h;
+   int src_x;
+   int src_y;
+   int src_w;
+   int src_h;
+   int clip_x;
+   int clip_y;
+   int clip_w;
+   int clip_h;
+   int recout_x;
+   int recout_y;
+   int recout_w;
+   int recout_h;
+   int viewport_x;
+   int viewport_y;
+   int viewport_w;
+   int viewport_h;
+   int flip_immediate;
+   int surface_pitch;
+   int format;
+   int swizzle;
+   unsigned int update_flags;
+};
+
+#define fill_out_trace_pipe_state(trace_pipe_state, pipe_ctx) \
+   do { \
+   trace_pipe_state.pipe_idx   = (pipe_ctx)->pipe_idx; \
+   trace_pipe_state.stream = (pipe_ctx)->stream; \
+   trace_pipe_state.stream_w   = 
(pipe_ctx)->stream->timing.h_addressable; \
+   trace_pipe_state.stream_h   = 
(pipe_ctx)->stream->timing.v_addressable; \
+   trace_pipe_state.dst_x  = 
(pipe_ctx)->plane_state->dst_rect.x; \
+   trace_pipe_state.dst_y  = 
(pipe_ctx)->plane_state->dst_rect.y; \
+   trace_pipe_state.dst_w  = 
(pipe_ctx)->plane_state->dst_rect.width; \
+   trace_pipe_state.dst_h  = 
(pipe_ctx)->plane_state->dst_rect.height; \
+   trace_pipe_state.src_x  = 
(pipe_ctx)->plane_state->src_rect.x; \
+   trace_pipe_state.src_y  = 
(pipe_ctx)->plane_state->src_rect.y; \
+   trace_pipe_state.src_w  = 
(pipe_ctx)->plane_state->src_rect.width; \
+   trace_pipe_state.src_h  = 
(pipe_ctx)->plane_state->src_rect.height; \
+   trace_pipe_state.clip_x = 
(pipe_ctx)->plane_state->clip_rect.x; \
+   trace_pipe_state.clip_y = 
(pipe_ctx)->plane_state->clip_rect.y; \
+   trace_pipe_state.clip_w = 
(pipe_ctx)->plane_state->clip_rect.width; \
+   trace_pipe_state.clip_h = 
(pipe_ctx)->plane_state->clip_rect.height; \
+   trace_pipe_state.recout_x   = 
(pipe_ctx)->plane_res.scl_data.recout.x; \
+   trace_pipe_state.recout_y   = 
(pipe_ctx)->plane_res.scl_data.recout.y; \
+   trace_pipe_state.recout_w   = 
(pipe_ctx)->plane_res.scl_data.recout.width; \
+   trace_pipe_state.recout_h   = 
(pipe_ctx)->plane_res.scl_data.recout.height; \
+   trace_pipe_state.viewport_x = 
(pipe_ctx)->plane_res.scl_data.viewport.x; \
+   trace_pipe_state.viewport_y = 
(pipe_ctx)->plane_res.scl_data.viewport.y; \
+   trace_pipe_state.viewport_w = 
(pipe_ctx)->plane_res.scl_data.viewport.width; \
+   trace_pipe_state.viewport_h = 
(pipe_ctx)->plane_res.scl_data.viewport.height; \
+   trace_pipe_state.flip_immediate = 
(pipe_ctx)->plane_state->flip_immediate; \
+   trace_pipe_state.surface_pitch  = 
(pipe_ctx)->plane_state->plane_size.surface_pitch; \
+   trace_pipe_state.format = 
(pipe_ctx)->plane_state->format; \
+   trace_pipe_state.swizzle= 
(pipe_ctx)->plane_state->tiling_info.gfx9.swizzle; \
+   trac

Re: [PATCH v2 1/4] drm/amd/display: Rework registers tracepoint

2020-09-11 Thread Kazlauskas, Nicholas

On 2020-09-11 10:59 a.m., Rodrigo Siqueira wrote:

amdgpu_dc_rreg and amdgpu_dc_wreg are very similar, for this reason,
this commits abstract these two events by using DECLARE_EVENT_CLASS and
create an instance of it for each one of these events.

Signed-off-by: Rodrigo Siqueira 


This looks reasonable to me. Does this still show up as 
amdpgu_dc_rrreg/amdgpu_dc_wreg in the captured trace log?


As long as we can still tell this apart you can consider this patch:

Reviewed-by: Nicholas Kazlauskas 

Regards,
Nicholas Kazlauskas


---
  .../amd/display/amdgpu_dm/amdgpu_dm_trace.h   | 55 ---
  1 file changed, 24 insertions(+), 31 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h
index d898981684d5..dd34e11b1079 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h
@@ -31,40 +31,33 @@
  
  #include 
  
-TRACE_EVENT(amdgpu_dc_rreg,

-   TP_PROTO(unsigned long *read_count, uint32_t reg, uint32_t value),
-   TP_ARGS(read_count, reg, value),
-   TP_STRUCT__entry(
-   __field(uint32_t, reg)
-   __field(uint32_t, value)
-   ),
-   TP_fast_assign(
-   __entry->reg = reg;
-   __entry->value = value;
-   *read_count = *read_count + 1;
-   ),
-   TP_printk("reg=0x%08lx, value=0x%08lx",
-   (unsigned long)__entry->reg,
-   (unsigned long)__entry->value)
-);
+DECLARE_EVENT_CLASS(amdgpu_dc_reg_template,
+   TP_PROTO(unsigned long *count, uint32_t reg, uint32_t 
value),
+   TP_ARGS(count, reg, value),
  
-TRACE_EVENT(amdgpu_dc_wreg,

-   TP_PROTO(unsigned long *write_count, uint32_t reg, uint32_t value),
-   TP_ARGS(write_count, reg, value),
-   TP_STRUCT__entry(
-   __field(uint32_t, reg)
-   __field(uint32_t, value)
-   ),
-   TP_fast_assign(
-   __entry->reg = reg;
-   __entry->value = value;
-   *write_count = *write_count + 1;
-   ),
-   TP_printk("reg=0x%08lx, value=0x%08lx",
-   (unsigned long)__entry->reg,
-   (unsigned long)__entry->value)
+   TP_STRUCT__entry(
+__field(uint32_t, reg)
+__field(uint32_t, value)
+   ),
+
+   TP_fast_assign(
+  __entry->reg = reg;
+  __entry->value = value;
+  *count = *count + 1;
+   ),
+
+   TP_printk("reg=0x%08lx, value=0x%08lx",
+ (unsigned long)__entry->reg,
+ (unsigned long)__entry->value)
  );
  
+DEFINE_EVENT(amdgpu_dc_reg_template, amdgpu_dc_rreg,

+TP_PROTO(unsigned long *count, uint32_t reg, uint32_t value),
+TP_ARGS(count, reg, value));
+
+DEFINE_EVENT(amdgpu_dc_reg_template, amdgpu_dc_wreg,
+TP_PROTO(unsigned long *count, uint32_t reg, uint32_t value),
+TP_ARGS(count, reg, value));
  
  TRACE_EVENT(amdgpu_dc_performance,

TP_PROTO(unsigned long read_count, unsigned long write_count,



___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH v2] drm/amdgpu/dc: Require primary plane to be enabled whenever the CRTC is

2020-09-14 Thread Kazlauskas, Nicholas

On 2020-09-14 3:52 a.m., Michel Dänzer wrote:

On 2020-09-07 9:57 a.m., Daniel Vetter wrote:

On Fri, Sep 04, 2020 at 12:43:04PM +0200, Michel Dänzer wrote:

From: Michel Dänzer 

Don't check drm_crtc_state::active for this either, per its
documentation in include/drm/drm_crtc.h:

  * Hence drivers must not consult @active in their various
  * &drm_mode_config_funcs.atomic_check callback to reject an atomic
  * commit.

atomic_remove_fb disables the CRTC as needed for disabling the primary
plane.

This prevents at least the following problems if the primary plane gets
disabled (e.g. due to destroying the FB assigned to the primary plane,
as happens e.g. with mutter in Wayland mode):

* The legacy cursor ioctl returned EINVAL for a non-0 cursor FB ID
   (which enables the cursor plane).
* If the cursor plane was enabled, changing the legacy DPMS property
   value from off to on returned EINVAL.

v2:
* Minor changes to code comment and commit log, per review feedback.

GitLab: 
https://gitlab.gnome.org/GNOME/mutter/-/issues/1108 

GitLab: 
https://gitlab.gnome.org/GNOME/mutter/-/issues/1165 

GitLab: 
https://gitlab.gnome.org/GNOME/mutter/-/issues/1344 


Suggested-by: Daniel Vetter 
Signed-off-by: Michel Dänzer 


Commit message agrees with my understand of this maze now, so:

Acked-by: Daniel Vetter 


Thanks Daniel!


Nick / Harry, any more feedback? If not, can you apply this?


P.S. Since DCN doesn't make a distinction between primary or overlay 
planes in hardware, could ChromiumOS achieve the same effect with only 
the primary plane instead of only an overlay plane? If so, maybe there's 
no need for the "black plane hack".





I only know that atomictest tries to enable this configuration. Not sure 
if ChromiumOS or other "real" userspace tries this configuration.


Maybe for now this can go in and if something breaks we can deal with 
the fallout then. We can always go back to lying to userspace about the 
cursor being visible if the commit fails in that case I guess since the 
blank plane hack is going to add a significant amount of complexity to DM.


Reviewed-by: Nicholas Kazlauskas 

Regards,
Nicholas Kazlauskas
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH v2] drm/amdgpu/dc: Require primary plane to be enabled whenever the CRTC is

2020-09-14 Thread Kazlauskas, Nicholas

On 2020-09-14 11:22 a.m., Michel Dänzer wrote:

On 2020-09-14 4:37 p.m., Kazlauskas, Nicholas wrote:

On 2020-09-14 3:52 a.m., Michel Dänzer wrote:

On 2020-09-07 9:57 a.m., Daniel Vetter wrote:

On Fri, Sep 04, 2020 at 12:43:04PM +0200, Michel Dänzer wrote:

From: Michel Dänzer 

Don't check drm_crtc_state::active for this either, per its
documentation in include/drm/drm_crtc.h:

  * Hence drivers must not consult @active in their various
  * &drm_mode_config_funcs.atomic_check callback to reject an atomic
  * commit.

atomic_remove_fb disables the CRTC as needed for disabling the primary
plane.

This prevents at least the following problems if the primary plane 
gets

disabled (e.g. due to destroying the FB assigned to the primary plane,
as happens e.g. with mutter in Wayland mode):

* The legacy cursor ioctl returned EINVAL for a non-0 cursor FB ID
   (which enables the cursor plane).
* If the cursor plane was enabled, changing the legacy DPMS property
   value from off to on returned EINVAL.

v2:
* Minor changes to code comment and commit log, per review feedback.

GitLab: 
https://gitlab.gnome.org/GNOME/mutter/-/issues/1108 

GitLab: 
https://gitlab.gnome.org/GNOME/mutter/-/issues/1165 

GitLab: 
https://gitlab.gnome.org/GNOME/mutter/-/issues/1344 


Suggested-by: Daniel Vetter 
Signed-off-by: Michel Dänzer 


Commit message agrees with my understand of this maze now, so:

Acked-by: Daniel Vetter 


Thanks Daniel!


Nick / Harry, any more feedback? If not, can you apply this?


P.S. Since DCN doesn't make a distinction between primary or overlay 
planes in hardware, could ChromiumOS achieve the same effect with 
only the primary plane instead of only an overlay plane? If so, maybe 
there's no need for the "black plane hack".





I only know that atomictest tries to enable this configuration. Not 
sure if ChromiumOS or other "real" userspace tries this configuration.


Someone mentioned that ChromiumOS uses this for video playback with 
black bars (when the video aspect ratio doesn't match the display's).


We only expose support for NV12 on the primary plane so we wouldn't be 
hitting this case at least.





Maybe for now this can go in and if something breaks we can deal with 
the fallout then. We can always go back to lying to userspace about 
the cursor being visible if the commit fails in that case I guess [...]


I'm not sure what you mean by that / how it could work.


Dropping the check you added in this patch:

+   if (state->enable &&
+   !(state->plane_mask & drm_plane_mask(crtc->primary)))
return -EINVAL;

That way we can still allow the cursor plane to be enabled while 
primary/overlay are not, it's just not going to be actually visible on 
the screen. It'll fail some IGT tests but nothing really uses this 
configuration as more than just a temporary state.


Regards,
Nicholas Kazlauskas





Reviewed-by: Nicholas Kazlauskas 


Thanks!




___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH v2 0/4] Enlarge tracepoints in the display component

2020-09-16 Thread Kazlauskas, Nicholas

On 2020-09-16 5:12 a.m., Daniel Vetter wrote:

On Fri, Sep 11, 2020 at 10:59:23AM -0400, Rodrigo Siqueira wrote:

Debug issues related to display can be a challenge due to the complexity
around this topic and different source of information might help in this
process. We already have support for tracepoints inside the display
component, i.e., we have the basic functionalities available and we just
need to expand it in order to make it more valuable for debugging. For
this reason, this patchset reworks part of the current tracepoint
options and add different sets of tracing inside amdgpu_dm, display
core, and DCN10. The first patch of this series just rework part of the
current tracepoints and the last set of patches introduces new
tracepoints.

This first patchset version is functional. Please, let me know what I
can improve in the current version but also let me know what kind of
tracepoint I can add for the next version.

Finally, I want to highlight that this work is based on a set of patches
originally made by Nicholas Kazlauskas.

Change in V2:
- I added another patch for capturing the clock state for different display
   architecture.


Hm I'm not super sure tracepoints for state dumping are the right thing
here. We kinda have the atomic state dumping code with all the various
callbacks, and you can extend that pretty easily. Gives you full state
dump in debugfs, plus a few function to dump into dmesg.

Maybe what we need is a function to dump this also into printk tracepoint
(otoh with Sean Paul's tracepoint work we'd get that through the dmesg
stuff already), and then you could do it there?

Upside is that for customers they'd get a much more consistent way to
debug display issues across different drivers.

For low-level hw debug what we do is give the hw guys an mmio trace, and
they replay it on the fancy boxes :-) So for that I think this here is
again too high level, but maybe what you have is a bit different.
-Daniel


We have raw register traces, but what I find most useful is to be able 
to see are the incoming DRM IOCTLs, objects and properties per commit.


Many of the bugs we see in display code is in the conversion from DRM -> 
DM -> DC state. The current HW state is kind of useless in most cases, 
but the sequence helps track down intermittent problems and understand 
state transitions.


Tracepoints provide everything I really need to be able to track down 
these problems without falling back to a full debugger. The existing DRM 
prints (even at high logging levels) aren't enough to understand what's 
going on in most cases in our driver so funneling those into tracepoints 
to improve perf doesn't really help that much.


I think this kind of idea was rejected for DRM core last year with 
Sean's patch series but if we can't get them into core then I'd like to 
get them into our driver at least. These are a cleaned up version of 
Sean's work + my work that I end up applying locally whenever I debug 
something.


Regards,
Nicholas Kazlauskas





Rodrigo Siqueira (4):
   drm/amd/display: Rework registers tracepoint
   drm/amd/display: Add tracepoint for amdgpu_dm
   drm/amd/display: Add pipe_state tracepoint
   drm/amd/display: Add tracepoint for capturing clocks state

  .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c |  17 +
  .../amd/display/amdgpu_dm/amdgpu_dm_trace.h   | 712 +-
  .../dc/clk_mgr/dce112/dce112_clk_mgr.c|   5 +
  .../display/dc/clk_mgr/dcn10/rv1_clk_mgr.c|   4 +
  .../display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c  |   4 +
  .../amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c |   4 +
  .../display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c  |   4 +
  drivers/gpu/drm/amd/display/dc/core/dc.c  |  11 +
  .../gpu/drm/amd/display/dc/dce/dce_clk_mgr.c  |   5 +
  .../amd/display/dc/dcn10/dcn10_hw_sequencer.c |  17 +-
  10 files changed, 747 insertions(+), 36 deletions(-)

--
2.28.0





___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amd/display: Add missing "Copy GSL groups when committing a new context"

2020-09-16 Thread Kazlauskas, Nicholas

On 2020-09-16 1:08 p.m., Bhawanpreet Lakha wrote:

[Why]
"Copy GSL groups when committing a new context" patch was accidentally
removed during a refactor

Patch: 21ffcc94d5b ("drm/amd/display: Copy GSL groups when committing a new 
context")

[How]
Re add it

Fixes: b6e881c9474 ("drm/amd/display: update navi to use new surface programming 
behaviour")
Signed-off-by: Bhawanpreet Lakha 


Reviewed-by: Nicholas Kazlauskas 

Regards,
Nicholas Kazlauskas


---
  drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c | 11 +++
  1 file changed, 11 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c 
b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index 5720b6e5d321..01530e686f43 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -1642,6 +1642,17 @@ void dcn20_program_front_end_for_ctx(
struct dce_hwseq *hws = dc->hwseq;
DC_LOGGER_INIT(dc->ctx->logger);
  
+	/* Carry over GSL groups in case the context is changing. */

+   for (i = 0; i < dc->res_pool->pipe_count; i++) {
+   struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+   struct pipe_ctx *old_pipe_ctx =
+   &dc->current_state->res_ctx.pipe_ctx[i];
+
+   if (pipe_ctx->stream == old_pipe_ctx->stream)
+   pipe_ctx->stream_res.gsl_group =
+   old_pipe_ctx->stream_res.gsl_group;
+   }
+
if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = 
&context->res_ctx.pipe_ctx[i];



___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amdgpu/dc: Pixel encoding DRM property and module parameter

2020-09-28 Thread Kazlauskas, Nicholas

On 2020-09-28 3:31 p.m., Christian König wrote:

Am 28.09.20 um 19:35 schrieb James Ettle:

On Mon, 2020-09-28 at 10:26 -0400, Harry Wentland wrote:

On 2020-09-25 5:18 p.m., Alex Deucher wrote:

On Tue, Sep 22, 2020 at 4:51 PM James Ettle 
wrote:

On 22/09/2020 21:33, Alex Deucher wrote:

+/**
+ * DOC: pixel_encoding (string)
+ * Specify the initial pixel encoding used by a connector.
+ */
+static char amdgpu_pixel_encoding[MAX_INPUT];
+MODULE_PARM_DESC(pixel_encoding, "Override pixel encoding");
+module_param_string(pixel_encoding, amdgpu_pixel_encoding,
sizeof(amdgpu_pixel_encoding), 0444);

You can drop this part.  We don't need a module parameter if we
have a
kms property.

Alex

OK, but is there then an alternative means of setting the pixel
encoding to be used immediately on boot or when amdgpu loads?
Also are there user tools other than xrandr to change a KMS
property, for Wayland and console users?

You can force some things on the kernel command line, but I don't
recall whether that includes kms properties or not.  As for ways to
change properties, the KMS API provides a way.  those are exposed
via
randr when using X.  When using wayland compositors, it depends on
the
compositor.


I'm not aware of a way to specify KMS properties through the kernel
command line. I don't think it's possible.

For atomic userspace the userspace wants to be the authority on the
KMS
config. I'm not sure there's a way to set these properties with
Wayland
unless a Wayland compositor plumbs them through.

Can you summarize on a higher level what problem you're trying to
solve?
I wonder if it'd be better solved with properties on a DRM level that
all drivers can follow if desired.

Harry


Alex


The problem this is trying to solve is that the pixel format defaults
to YCbCr444 on HDMI if the monitor claims to support it, in preference
to RGB. This behaviour is hard-coded (see the
comment fill_stream_properties_from_drm_display_mode) and there is no
way for the user to change the pixel format to RGB, other than hacking
the EDID to disable the YCbCr flags.

Using YCbCr (rather than RGB) has been reported to cause suboptimal
results for some users: colour quality issues or perceptible conversion
latency at the monitor end -- see:

https://gitlab.freedesktop.org/drm/amd/-/issues/476 



for the full details.

This patch tries to solve this issue by adding a DRM property so Xorg
users can change the pixel encoding on-the-fly, and a module parameter
to set the default encoding at amdgpu's init for all users.

[I suppose an alternative here is to look into the rationale of
defaulting to YCbCr444 on HDMI when the monitor also supports RGB. For
reference although on my kit I see no detrimental effects from YCbCr,
I'm using onboard graphics with a motherboard that has just D-sub and
HDMI -- so DisplayPort's not an option.]


Ah, that problem again. Yes, that's an issue since the early fglrx days 
on linux.


Shouldn't the pixel encoding be part of the mode to run ?

Regards,
Christian.


DRM modes don't specify the encoding. The property as part of this patch 
lets userspace override it but the userspace GUI support isn't there on 
Wayland IIRC.


I'm fine with adding the properties but I don't think the module 
parameter is the right solution here. I think it's better if we try to 
get this into atomic userspace as well or revive efforts that have been 
already started before.


The problem with the module parameters is that it'd be applying a 
default to every DRM connector. No way to specify different defaults per 
DRM connector, nor do we know the full connector set at driver 
initialization. The list is dynamic and can change when you plug/unplug 
MST displays.


Regards,
Nicholas Kazlauskas





-James


___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx 





___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amd/display: Fix external display detection with overlay

2020-10-01 Thread Kazlauskas, Nicholas

On 2020-10-01 5:06 a.m., Pratik Vishwakarma wrote:

[Why]
When overlay plane is in use and external display
is connected, atomic check will fail.

[How]
Disable overlay plane on multi-monitor scenario
by tying it to single crtc.

Signed-off-by: Pratik Vishwakarma 


This will break overlay usage on any other CRTC other than index 1. That 
index is arbitrary and can vary based on board configuration. As-is this 
patch breaks a number of our existing IGT tests that were previously 
passing.


Userspace should really be made aware if possible to understand that 
overlays can't be left enabled after major hardware configurations - eg. 
enabling extra planes and CRTCs.


Regards,
Nicholas Kazlauskas


---
  drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 2 +-
  1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index e8177656e083..e45c1176048a 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -3149,7 +3149,7 @@ static int initialize_plane(struct amdgpu_display_manager 
*dm,
 */
possible_crtcs = 1 << plane_id;
if (plane_id >= dm->dc->caps.max_streams)
-   possible_crtcs = 0xff;
+   possible_crtcs = 0x01;
  
  	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
  



___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amd/display: Add missing function pointers for dcn3

2020-10-05 Thread Kazlauskas, Nicholas

On 2020-10-05 2:10 p.m., Bhawanpreet Lakha wrote:

These function pointers are missing from dcn30_init

.calc_vupdate_position
.set_pipe

So add them

Signed-off-by: Bhawanpreet Lakha 


Reviewed-by: Nicholas Kazlauskas 

Would be good to mention what these are used for specifically though.

The calc_vupdate_position in particular is used to help avoid cursor 
stuttering.


Regards,
Nicholas Kazlauskas


---
  drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c | 2 ++
  1 file changed, 2 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c 
b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
index 7c90c506..dc312d4172af 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
@@ -90,9 +90,11 @@ static const struct hw_sequencer_funcs dcn30_funcs = {
.init_vm_ctx = dcn20_init_vm_ctx,
.set_flip_control_gsl = dcn20_set_flip_control_gsl,
.get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
+   .calc_vupdate_position = dcn10_calc_vupdate_position,
.apply_idle_power_optimizations = dcn30_apply_idle_power_optimizations,
.set_backlight_level = dcn21_set_backlight_level,
.set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
+   .set_pipe = dcn21_set_pipe,
  };
  
  static const struct hwseq_private_funcs dcn30_private_funcs = {




___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 1/2] drm/amd/display: setup system context in dm_init

2020-10-14 Thread Kazlauskas, Nicholas

On 2020-10-14 3:04 a.m., Yifan Zhang wrote:

Change-Id: I831a5ade8b87c23d21a63d08cc4d338468769e2b
Signed-off-by: Yifan Zhang 
---
  .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 61 +++
  1 file changed, 61 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 3cf4e08931bb..aaff8800c7a0 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -887,12 +887,67 @@ static void 
amdgpu_check_debugfs_connector_property_change(struct amdgpu_device
}
  }
  
+static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)

+{
+   uint64_t pt_base;
+   uint32_t logical_addr_low;
+   uint32_t logical_addr_high;
+   uint32_t agp_base, agp_bot, agp_top;
+   PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
+
+   logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
+   pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
+
+   if (adev->apu_flags & AMD_APU_IS_RAVEN2)
+   /*
+* Raven2 has a HW issue that it is unable to use the vram which
+* is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
+* workaround that increase system aperture high address (add 1)
+* to get rid of the VM fault and hardware hang.
+*/
+   logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, 
adev->gmc.agp_end >> 18);
+   else
+   logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 
18;
+
+   agp_base = 0;
+   agp_bot = adev->gmc.agp_start >> 24;
+   agp_top = adev->gmc.agp_end >> 24;
+
+
+   page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
+   page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
+   page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
+   page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
+   page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
+   page_table_base.low_part = lower_32_bits(pt_base);
+
+   pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 
18;
+   pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
+
+   pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
+   pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
+   pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
+
+   pa_config->system_aperture.fb_base = adev->gmc.fb_start;
+   pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
+   pa_config->system_aperture.fb_top = adev->gmc.fb_end;
+
+   pa_config->gart_config.page_table_start_addr = page_table_start.quad_part 
<< 12;
+   pa_config->gart_config.page_table_end_addr = page_table_end.quad_part 
<< 12;
+   pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
+
+   pa_config->is_hvm_enabled = 0;
+
+}
+
+
  static int amdgpu_dm_init(struct amdgpu_device *adev)
  {
struct dc_init_data init_data;
  #ifdef CONFIG_DRM_AMD_DC_HDCP
struct dc_callback_init init_params;
  #endif
+   struct dc_phy_addr_space_config pa_config;
int r;
  
  	adev->dm.ddev = adev_to_drm(adev);

@@ -1040,6 +1095,12 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
goto error;
}
  
+	mmhub_read_system_context(adev, &pa_config);

+
+   // Call the DC init_memory func
+   dc_setup_system_context(adev->dm.dc, &pa_config);
+
+


The dc_setup_system_context should come directly after dc_hardware_init().

With that fixed this series is

Reviewed-by: Nicholas Kazlauskas 

There's the vmid module as well that could be created after if needed 
but for s/g suport alone that's not necessary.


Regards,
Nicholas Kazlauskas


DRM_DEBUG_DRIVER("KMS initialized.\n");
  
  	return 0;




___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 1/2] drm/amd/display: setup system context in dm_init

2020-10-14 Thread Kazlauskas, Nicholas

On 2020-10-14 9:20 a.m., Kazlauskas, Nicholas wrote:

On 2020-10-14 3:04 a.m., Yifan Zhang wrote:

Change-Id: I831a5ade8b87c23d21a63d08cc4d338468769e2b
Signed-off-by: Yifan Zhang 
---
  .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 61 +++
  1 file changed, 61 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c

index 3cf4e08931bb..aaff8800c7a0 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -887,12 +887,67 @@ static void 
amdgpu_check_debugfs_connector_property_change(struct amdgpu_device

  }
  }
+static void mmhub_read_system_context(struct amdgpu_device *adev, 
struct dc_phy_addr_space_config *pa_config)

+{
+    uint64_t pt_base;
+    uint32_t logical_addr_low;
+    uint32_t logical_addr_high;
+    uint32_t agp_base, agp_bot, agp_top;
+    PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, 
page_table_base;

+
+    logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) 
>> 18;

+    pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
+
+    if (adev->apu_flags & AMD_APU_IS_RAVEN2)
+    /*
+ * Raven2 has a HW issue that it is unable to use the vram which
+ * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
+ * workaround that increase system aperture high address (add 1)
+ * to get rid of the VM fault and hardware hang.
+ */
+    logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, 
adev->gmc.agp_end >> 18);

+    else
+    logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) 
>> 18;

+
+    agp_base = 0;
+    agp_bot = adev->gmc.agp_start >> 24;
+    agp_top = adev->gmc.agp_end >> 24;
+
+
+    page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 
0xF;

+    page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
+    page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
+    page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
+    page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
+    page_table_base.low_part = lower_32_bits(pt_base);
+
+    pa_config->system_aperture.start_addr = 
(uint64_t)logical_addr_low << 18;
+    pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high 
<< 18;

+
+    pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
+    pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
+    pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
+
+    pa_config->system_aperture.fb_base = adev->gmc.fb_start;
+    pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
+    pa_config->system_aperture.fb_top = adev->gmc.fb_end;
+
+    pa_config->gart_config.page_table_start_addr = 
page_table_start.quad_part << 12;
+    pa_config->gart_config.page_table_end_addr = 
page_table_end.quad_part << 12;
+    pa_config->gart_config.page_table_base_addr = 
page_table_base.quad_part;

+
+    pa_config->is_hvm_enabled = 0;
+
+}
+
+
  static int amdgpu_dm_init(struct amdgpu_device *adev)
  {
  struct dc_init_data init_data;
  #ifdef CONFIG_DRM_AMD_DC_HDCP
  struct dc_callback_init init_params;
  #endif
+    struct dc_phy_addr_space_config pa_config;
  int r;
  adev->dm.ddev = adev_to_drm(adev);
@@ -1040,6 +1095,12 @@ static int amdgpu_dm_init(struct amdgpu_device 
*adev)

  goto error;
  }
+    mmhub_read_system_context(adev, &pa_config);
+
+    // Call the DC init_memory func
+    dc_setup_system_context(adev->dm.dc, &pa_config);
+
+


The dc_setup_system_context should come directly after dc_hardware_init().

With that fixed this series is

Reviewed-by: Nicholas Kazlauskas 

There's the vmid module as well that could be created after if needed 
but for s/g suport alone that's not necessary.


Regards,
Nicholas Kazlauskas


Actually, the commit messages could use some work too - would be good to 
have at least a brief why/how description.


Don't forget to drop the Change-Id as well.

Regards,
Nicholas Kazlauskas




  DRM_DEBUG_DRIVER("KMS initialized.\n");
  return 0;



___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx 



___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 2/2] drm/amd/display: Avoid MST manager resource leak.

2020-10-16 Thread Kazlauskas, Nicholas

On 2020-10-15 11:02 p.m., Alex Deucher wrote:

On Wed, Oct 14, 2020 at 1:25 PM Andrey Grodzovsky
 wrote:


On connector destruction call drm_dp_mst_topology_mgr_destroy
to release resources allocated in drm_dp_mst_topology_mgr_init.
Do it only if MST manager was initialized before otherwsie a crash
is seen on driver unload/device unplug.



Not really an mst expert, but this seems to match what i915 and
nouveau do.  Series is:
Acked-by: Alex Deucher 


Signed-off-by: Andrey Grodzovsky 


Looks reasonable to me. Untested, however.

Reviewed-by: Nicholas Kazlauskas 

Regards,
Nicholas Kazlauskas


---
  drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 7 +++
  1 file changed, 7 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index a72447d..64799c4 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -5170,6 +5170,13 @@ static void amdgpu_dm_connector_destroy(struct 
drm_connector *connector)
 struct amdgpu_device *adev = drm_to_adev(connector->dev);
 struct amdgpu_display_manager *dm = &adev->dm;

+   /*
+* Call only if mst_mgr was initialized before since it's not done
+* for all connector types.
+*/
+   if (aconnector->mst_mgr.dev)
+   drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
+
  #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)

--
2.7.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amd/display: Fix deadlock during gpu reset

2021-01-11 Thread Kazlauskas, Nicholas

On 2021-01-11 2:55 p.m., Bhawanpreet Lakha wrote:

[Why]
during idle optimizations we acquire the dc_lock, this lock is also
acquired during gpu_reset so we end up hanging the system due to a
deadlock

[How]
If we are in gpu reset dont acquire the dc lock, as we already have it


Are we sure this is the behavior we want?

I think if we are in GPU reset then we shouldn't be attempting to modify 
idle optimization state at all, ie. return early if amdgpu_in_reset.


The calls around the locks are working around bad policy.

Regards,
Nicholas Kazlauskas



Fixes: 06d5652541c3 ("drm/amd/display: enable idle optimizations for linux (MALL 
stutter)")
Signed-off-by: Bhawanpreet Lakha 
---
  drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 6 --
  1 file changed, 4 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 99c7f9eb44aa..2170e1b2d32c 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -5556,7 +5556,8 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, 
bool enable)
if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
return -EBUSY;
  
-	mutex_lock(&dm->dc_lock);

+   if (!amdgpu_in_reset(adev))
+   mutex_lock(&dm->dc_lock);
  
  	if (enable)

dm->active_vblank_irq_count++;
@@ -5568,7 +5569,8 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, 
bool enable)
  
  	DRM_DEBUG_DRIVER("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
  
-	mutex_unlock(&dm->dc_lock);

+   if (!amdgpu_in_reset(adev))
+   mutex_unlock(&dm->dc_lock);
  
  	return 0;

  }



___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amd/display: Fix deadlock during gpu reset v3

2021-01-12 Thread Kazlauskas, Nicholas

On 2021-01-12 11:13 a.m., Bhawanpreet Lakha wrote:

[Why]
during idle optimizations we acquire the dc_lock, this lock is also
acquired during gpu_reset so we end up hanging the system due to a
deadlock

[How]
If we are in gpu reset:
  - disable idle optimizations and skip calls to the dc function

v2: skip idle optimizations calls
v3: add guard for DCN

Fixes: 06d5652541c3 ("drm/amd/display: enable idle optimizations for linux (MALL 
stutter)")
Signed-off-by: Bhawanpreet Lakha 


Reviewed-by: Nicholas Kazlauskas 

Regards,
Nicholas Kazlauskas


---
  drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 10 ++
  1 file changed, 10 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index de71b6c21590..82ceb0a8ba29 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1816,6 +1816,11 @@ static int dm_suspend(void *handle)
  
  	if (amdgpu_in_reset(adev)) {

mutex_lock(&dm->dc_lock);
+
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+   dc_allow_idle_optimizations(adev->dm.dc, false);
+#endif
+
dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
  
  		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);

@@ -5556,6 +5561,10 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, 
bool enable)
if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
return -EBUSY;
  
+#if defined(CONFIG_DRM_AMD_DC_DCN)

+   if (amdgpu_in_reset(adev))
+   return 0;
+
mutex_lock(&dm->dc_lock);
  
  	if (enable)

@@ -5572,6 +5581,7 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, 
bool enable)
  
  	mutex_unlock(&dm->dc_lock);
  
+#endif

return 0;
  }
  



___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 3/3] drm/amd/display: Update dcn30_apply_idle_power_optimizations() code

2021-01-19 Thread Kazlauskas, Nicholas

On 2021-01-19 3:38 p.m., Bhawanpreet Lakha wrote:

Update the function for idle optimizations
-remove hardcoded size
-enable no memory-request case
-add cursor copy
-update mall eligibility check case

Signed-off-by: Bhawanpreet Lakha 
Signed-off-by: Joshua Aberback 


Series is:

Reviewed-by: Nicholas Kazlauskas 

Though you might want to update patch 1's commit message to explain a 
little more detail about watermark set D.


Regards,
Nicholas Kazlauskas


---
  drivers/gpu/drm/amd/display/dc/dc.h   |   2 +
  .../drm/amd/display/dc/dcn30/dcn30_hwseq.c| 157 +-
  .../amd/display/dc/dcn302/dcn302_resource.c   |   4 +-
  .../gpu/drm/amd/display/dmub/inc/dmub_cmd.h   |   5 +
  4 files changed, 129 insertions(+), 39 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dc.h 
b/drivers/gpu/drm/amd/display/dc/dc.h
index e21d4602e427..71d46ade24e5 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -502,6 +502,8 @@ struct dc_debug_options {
  #if defined(CONFIG_DRM_AMD_DC_DCN)
bool disable_idle_power_optimizations;
unsigned int mall_size_override;
+   unsigned int mall_additional_timer_percent;
+   bool mall_error_as_fatal;
  #endif
bool dmub_command_table; /* for testing only */
struct dc_bw_validation_profile bw_val_profile;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c 
b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
index 5c546b06f551..dff83c6a142a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
@@ -710,8 +710,11 @@ void dcn30_program_dmdata_engine(struct pipe_ctx *pipe_ctx)
  bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable)
  {
union dmub_rb_cmd cmd;
-   unsigned int surface_size, refresh_hz, denom;
uint32_t tmr_delay = 0, tmr_scale = 0;
+   struct dc_cursor_attributes cursor_attr;
+   bool cursor_cache_enable = false;
+   struct dc_stream_state *stream = NULL;
+   struct dc_plane_state *plane = NULL;
  
  	if (!dc->ctx->dmub_srv)

return false;
@@ -722,72 +725,150 @@ bool dcn30_apply_idle_power_optimizations(struct dc *dc, 
bool enable)
  
  			/* First, check no-memory-requests case */

for (i = 0; i < dc->current_state->stream_count; i++) {
-   if (dc->current_state->stream_status[i]
-   .plane_count)
+   if 
(dc->current_state->stream_status[i].plane_count)
/* Fail eligibility on a visible stream 
*/
break;
}
  
-			if (dc->current_state->stream_count == 1 // single display only

-   && dc->current_state->stream_status[0].plane_count 
== 1 // single surface only
-   && 
dc->current_state->stream_status[0].plane_states[0]->address.page_table_base.quad_part 
== 0 // no VM
-   // Only 8 and 16 bit formats
-   && 
dc->current_state->stream_status[0].plane_states[0]->format <= 
SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F
-   && 
dc->current_state->stream_status[0].plane_states[0]->format >= 
SURFACE_PIXEL_FORMAT_GRPH_ARGB) {
-   surface_size = 
dc->current_state->stream_status[0].plane_states[0]->plane_size.surface_pitch *
-   
dc->current_state->stream_status[0].plane_states[0]->plane_size.surface_size.height
 *
-   
(dc->current_state->stream_status[0].plane_states[0]->format >= 
SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ?
-8 : 4);
-   } else {
-   // TODO: remove hard code size
-   surface_size = 128 * 1024 * 1024;
+   if (i == dc->current_state->stream_count) {
+   /* Enable no-memory-requests case */
+   memset(&cmd, 0, sizeof(cmd));
+   cmd.mall.header.type = DMUB_CMD__MALL;
+   cmd.mall.header.sub_type = 
DMUB_CMD__MALL_ACTION_NO_DF_REQ;
+   cmd.mall.header.payload_bytes = 
sizeof(cmd.mall) - sizeof(cmd.mall.header);
+
+   dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd);
+   dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv);
+
+   return true;
}
  
-			// TODO: remove hard code size

-   if (surface_size < 128 * 1024 * 1024) {
-   refresh_hz = div_u64((unsigned long long) 
dc->current_state->streams[0]->timing.pix_clk_100hz *
- 

Re: [PATCH] drm/amd/display: Implement functions to let DC allocate GPU memory

2021-01-19 Thread Kazlauskas, Nicholas

On 2021-01-19 3:40 p.m., Bhawanpreet Lakha wrote:

From: Harry Wentland 

[Why]
DC needs to communicate with PM FW through GPU memory. In order
to do so we need to be able to allocate memory from within DC.

[How]
Call amdgpu_bo_create_kernel to allocate GPU memory and use a
list in amdgpu_display_manager to track our allocations so we
can clean them up later.

Signed-off-by: Harry Wentland 


Reviewed-by: Nicholas Kazlauskas 

Regards,
Nicholas Kazlauskas


---
  .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c |  2 +
  .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h |  9 +
  .../amd/display/amdgpu_dm/amdgpu_dm_helpers.c | 40 +--
  3 files changed, 48 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index e490fc2486f7..83ec92a69cba 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1017,6 +1017,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
  
  	init_data.soc_bounding_box = adev->dm.soc_bounding_box;
  
+	INIT_LIST_HEAD(&adev->dm.da_list);

+
/* Display Core create. */
adev->dm.dc = dc_create(&init_data);
  
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h

index 38bc0f88b29c..49137924a855 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -130,6 +130,13 @@ struct amdgpu_dm_backlight_caps {
bool aux_support;
  };
  
+struct dal_allocation {

+   struct list_head list;
+   struct amdgpu_bo *bo;
+   void *cpu_ptr;
+   u64 gpu_addr;
+};
+
  /**
   * struct amdgpu_display_manager - Central amdgpu display manager device
   *
@@ -350,6 +357,8 @@ struct amdgpu_display_manager {
 */
struct amdgpu_encoder mst_encoders[AMDGPU_DM_MAX_CRTC];
bool force_timing_sync;
+
+   struct list_head da_list;
  };
  
  enum dsc_clock_force_state {

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index 3244a6ea7a65..5dc426e6e785 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -652,8 +652,31 @@ void *dm_helpers_allocate_gpu_mem(
size_t size,
long long *addr)
  {
-   // TODO
-   return NULL;
+   struct amdgpu_device *adev = ctx->driver_context;
+   struct dal_allocation *da;
+   u32 domain = (type == DC_MEM_ALLOC_TYPE_GART) ?
+   AMDGPU_GEM_DOMAIN_GTT : AMDGPU_GEM_DOMAIN_VRAM;
+   int ret;
+
+   da = kzalloc(sizeof(struct dal_allocation), GFP_KERNEL);
+   if (!da)
+   return NULL;
+
+   ret = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE,
+ domain, &da->bo,
+ &da->gpu_addr, &da->cpu_ptr);
+
+   *addr = da->gpu_addr;
+
+   if (ret) {
+   kfree(da);
+   return NULL;
+   }
+
+   /* add da to list in dm */
+   list_add(&da->list, &adev->dm.da_list);
+
+   return da->cpu_ptr;
  }
  
  void dm_helpers_free_gpu_mem(

@@ -661,5 +684,16 @@ void dm_helpers_free_gpu_mem(
enum dc_gpu_mem_alloc_type type,
void *pvMem)
  {
-   // TODO
+   struct amdgpu_device *adev = ctx->driver_context;
+   struct dal_allocation *da;
+
+   /* walk the da list in DM */
+   list_for_each_entry(da, &adev->dm.da_list, list) {
+   if (pvMem == da->cpu_ptr) {
+   amdgpu_bo_free_kernel(&da->bo, &da->gpu_addr, 
&da->cpu_ptr);
+   list_del(&da->list);
+   kfree(da);
+   break;
+   }
+   }
  }



___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amd/display: Implement functions to let DC allocate GPU memory

2021-01-20 Thread Kazlauskas, Nicholas

On 2021-01-20 5:26 a.m., Christian König wrote:

Am 19.01.21 um 21:40 schrieb Bhawanpreet Lakha:

From: Harry Wentland 

[Why]
DC needs to communicate with PM FW through GPU memory. In order
to do so we need to be able to allocate memory from within DC.

[How]
Call amdgpu_bo_create_kernel to allocate GPU memory and use a
list in amdgpu_display_manager to track our allocations so we
can clean them up later.


Well that looks like classic mid-layering to me with a horrible 
implementation of the free function.


Christian.


FWIW this is only really used during device creation and destruction so 
the overhead of the free function isn't a considerable concern.


Does AMDGPU always need to know the GPU address for the allocation to 
free or should we work on fixing the callsites for this to pass it down?


We generally separate the CPU/GPU pointer but maybe it'd be best to have 
some sort of allocation object here that has both for DC's purposes.


Regards,
Nicholas Kazlauskas





Signed-off-by: Harry Wentland 
---
  .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c |  2 +
  .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h |  9 +
  .../amd/display/amdgpu_dm/amdgpu_dm_helpers.c | 40 +--
  3 files changed, 48 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c

index e490fc2486f7..83ec92a69cba 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1017,6 +1017,8 @@ static int amdgpu_dm_init(struct amdgpu_device 
*adev)

  init_data.soc_bounding_box = adev->dm.soc_bounding_box;
+    INIT_LIST_HEAD(&adev->dm.da_list);
+
  /* Display Core create. */
  adev->dm.dc = dc_create(&init_data);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h

index 38bc0f88b29c..49137924a855 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -130,6 +130,13 @@ struct amdgpu_dm_backlight_caps {
  bool aux_support;
  };
+struct dal_allocation {
+    struct list_head list;
+    struct amdgpu_bo *bo;
+    void *cpu_ptr;
+    u64 gpu_addr;
+};
+
  /**
   * struct amdgpu_display_manager - Central amdgpu display manager 
device

   *
@@ -350,6 +357,8 @@ struct amdgpu_display_manager {
   */
  struct amdgpu_encoder mst_encoders[AMDGPU_DM_MAX_CRTC];
  bool force_timing_sync;
+
+    struct list_head da_list;
  };
  enum dsc_clock_force_state {
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c

index 3244a6ea7a65..5dc426e6e785 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -652,8 +652,31 @@ void *dm_helpers_allocate_gpu_mem(
  size_t size,
  long long *addr)
  {
-    // TODO
-    return NULL;
+    struct amdgpu_device *adev = ctx->driver_context;
+    struct dal_allocation *da;
+    u32 domain = (type == DC_MEM_ALLOC_TYPE_GART) ?
+    AMDGPU_GEM_DOMAIN_GTT : AMDGPU_GEM_DOMAIN_VRAM;
+    int ret;
+
+    da = kzalloc(sizeof(struct dal_allocation), GFP_KERNEL);
+    if (!da)
+    return NULL;
+
+    ret = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE,
+  domain, &da->bo,
+  &da->gpu_addr, &da->cpu_ptr);
+
+    *addr = da->gpu_addr;
+
+    if (ret) {
+    kfree(da);
+    return NULL;
+    }
+
+    /* add da to list in dm */
+    list_add(&da->list, &adev->dm.da_list);
+
+    return da->cpu_ptr;
  }
  void dm_helpers_free_gpu_mem(
@@ -661,5 +684,16 @@ void dm_helpers_free_gpu_mem(
  enum dc_gpu_mem_alloc_type type,
  void *pvMem)
  {
-    // TODO
+    struct amdgpu_device *adev = ctx->driver_context;
+    struct dal_allocation *da;
+
+    /* walk the da list in DM */
+    list_for_each_entry(da, &adev->dm.da_list, list) {
+    if (pvMem == da->cpu_ptr) {
+    amdgpu_bo_free_kernel(&da->bo, &da->gpu_addr, &da->cpu_ptr);
+    list_del(&da->list);
+    kfree(da);
+    break;
+    }
+    }
  }


___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flists.freedesktop.org%2Fmailman%2Flistinfo%2Famd-gfx&data=04%7C01%7Cnicholas.kazlauskas%40amd.com%7C65096f1a05bf4379c1cd08d8bd2dd5a1%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C637467351862623818%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C1000&sdata=Krf3epZ%2FsAWOmHRhUSukqEjKJJBLwtKNEe7GWYKBG1w%3D&reserved=0 



___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 3/3] drm/amd/display: Skip modeset for front porch change

2021-01-21 Thread Kazlauskas, Nicholas

On 2021-01-19 10:50 a.m., Aurabindo Pillai wrote:

[Why]
A seamless transition between modes can be performed if the new incoming
mode has the same timing parameters as the optimized mode on a display with a
variable vtotal min/max.

Smooth video playback usecases can be enabled with this seamless transition by
switching to a new mode which has a refresh rate matching the video.

[How]
Skip full modeset if userspace requested a compatible freesync mode which only
differs in the front porch timing from the current mode.

Signed-off-by: Aurabindo Pillai 
Acked-by: Christian König 
---
  .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 233 +++---
  .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h |   1 +
  2 files changed, 198 insertions(+), 36 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index aaef2fb528fd..d66494cdd8c8 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -213,6 +213,9 @@ static bool amdgpu_dm_psr_disable_all(struct 
amdgpu_display_manager *dm);
  static const struct drm_format_info *
  amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
  
+static bool

+is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
+struct drm_crtc_state *new_crtc_state);
  /*
   * dm_vblank_get_counter
   *
@@ -4940,7 +4943,8 @@ static void fill_stream_properties_from_drm_display_mode(
const struct drm_connector *connector,
const struct drm_connector_state *connector_state,
const struct dc_stream_state *old_stream,
-   int requested_bpc)
+   int requested_bpc,
+   bool is_in_modeset)
  {
struct dc_crtc_timing *timing_out = &stream->timing;
const struct drm_display_info *info = &connector->display_info;
@@ -4995,19 +4999,28 @@ static void 
fill_stream_properties_from_drm_display_mode(
timing_out->hdmi_vic = hv_frame.vic;
}
  
-	timing_out->h_addressable = mode_in->crtc_hdisplay;

-   timing_out->h_total = mode_in->crtc_htotal;
-   timing_out->h_sync_width =
-   mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
-   timing_out->h_front_porch =
-   mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
-   timing_out->v_total = mode_in->crtc_vtotal;
-   timing_out->v_addressable = mode_in->crtc_vdisplay;
-   timing_out->v_front_porch =
-   mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
-   timing_out->v_sync_width =
-   mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
-   timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
+   if (is_in_modeset) {
+   timing_out->h_addressable = mode_in->hdisplay;
+   timing_out->h_total = mode_in->htotal;
+   timing_out->h_sync_width = mode_in->hsync_end - 
mode_in->hsync_start;
+   timing_out->h_front_porch = mode_in->hsync_start - 
mode_in->hdisplay;
+   timing_out->v_total = mode_in->vtotal;
+   timing_out->v_addressable = mode_in->vdisplay;
+   timing_out->v_front_porch = mode_in->vsync_start - 
mode_in->vdisplay;
+   timing_out->v_sync_width = mode_in->vsync_end - 
mode_in->vsync_start;
+   timing_out->pix_clk_100hz = mode_in->clock * 10;
+   } else {
+   timing_out->h_addressable = mode_in->crtc_hdisplay;
+   timing_out->h_total = mode_in->crtc_htotal;
+   timing_out->h_sync_width = mode_in->crtc_hsync_end - 
mode_in->crtc_hsync_start;
+   timing_out->h_front_porch = mode_in->crtc_hsync_start - 
mode_in->crtc_hdisplay;
+   timing_out->v_total = mode_in->crtc_vtotal;
+   timing_out->v_addressable = mode_in->crtc_vdisplay;
+   timing_out->v_front_porch = mode_in->crtc_vsync_start - 
mode_in->crtc_vdisplay;
+   timing_out->v_sync_width = mode_in->crtc_vsync_end - 
mode_in->crtc_vsync_start;
+   timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
+   }
+


Not sure if I commented on this last time but I don't really understand 
what this is_in_modeset logic is supposed to be doing here.


We should be modifying crtc_vsync_* for the generated modes, no? Not 
just the vsync_* parameters.



timing_out->aspect_ratio = get_aspect_ratio(mode_in);
  
  	stream->output_color_space = get_output_color_space(timing_out);

@@ -5227,6 +5240,33 @@ get_highest_refresh_rate_mode(struct amdgpu_dm_connector 
*aconnector,
return m_pref;
  }
  
+static bool is_freesync_video_mode(struct drm_display_mode *mode,

+  struct amdgpu_dm_connector *aconnector)
+{
+   struct drm_display_mode *high_mode;
+   int timing_diff;
+
+   high_mode = get_highest_refresh_rate_mode(aconnector, false);
+   if (!high_mode || !mode)
+   return 

Re: [PATCH 2/2] drm/amd/display: Fix HDMI deep color output for DCE 6-11.

2021-01-25 Thread Kazlauskas, Nicholas

On 2021-01-25 12:57 p.m., Alex Deucher wrote:

On Thu, Jan 21, 2021 at 1:17 AM Mario Kleiner
 wrote:


This fixes corrupted display output in HDMI deep color
10/12 bpc mode at least as observed on AMD Mullins, DCE-8.3.

It will hopefully also provide fixes for other DCE's up to
DCE-11, assuming those will need similar fixes, but i could
not test that for HDMI due to lack of suitable hw, so viewer
discretion is advised.

dce110_stream_encoder_hdmi_set_stream_attribute() is used for
HDMI setup on all DCE's and is missing color_depth assignment.

dce110_program_pix_clk() is used for pixel clock setup on HDMI
for DCE 6-11, and is missing color_depth assignment.

Additionally some of the underlying Atombios specific encoder
and pixelclock setup functions are missing code which is in
the classic amdgpu kms modesetting path and the in the radeon
kms driver for DCE6/DCE8.

encoder_control_digx_v3() - Was missing setup code wrt. amdgpu
and radeon kms classic drivers. Added here, but untested due to
lack of suitable test hw.

encoder_control_digx_v4() - Added missing setup code.
Successfully tested on AMD mullins / DCE-8.3 with HDMI deep color
output at 10 bpc and 12 bpc.

Note that encoder_control_digx_v5() has proper setup code in place
and is used, e.g., by DCE-11.2, but this code wasn't used for deep
color setup due to the missing cntl.color_depth setup in the calling
function for HDMI.

set_pixel_clock_v5() - Missing setup code wrt. classic amdgpu/radeon
kms. Added here, but untested due to lack of hw.

set_pixel_clock_v6() - Missing setup code added. Successfully tested
on AMD mullins DCE-8.3. This fixes corrupted display output at HDMI
deep color output with 10 bpc or 12 bpc.

Fixes: 4562236b3bc0 ("drm/amd/dc: Add dc display driver (v2)")

Signed-off-by: Mario Kleiner 
Cc: Harry Wentland 


These make sense. I've applied the series.  I'll let the display guys
gauge the other points in your cover letter.

Alex


I don't have any concerns with this patch.

Even though it's already applied feel free to have my:

Reviewed-by: Nicholas Kazlauskas 

Regards,
Nicholas Kazlauskas





---
  .../drm/amd/display/dc/bios/command_table.c   | 61 +++
  .../drm/amd/display/dc/dce/dce_clock_source.c | 14 +
  .../amd/display/dc/dce/dce_stream_encoder.c   |  1 +
  3 files changed, 76 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.c 
b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
index 070459e3e407..afc10b954ffa 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
@@ -245,6 +245,23 @@ static enum bp_result encoder_control_digx_v3(
 cntl->enable_dp_audio);
 params.ucLaneNum = (uint8_t)(cntl->lanes_number);

+   switch (cntl->color_depth) {
+   case COLOR_DEPTH_888:
+   params.ucBitPerColor = PANEL_8BIT_PER_COLOR;
+   break;
+   case COLOR_DEPTH_101010:
+   params.ucBitPerColor = PANEL_10BIT_PER_COLOR;
+   break;
+   case COLOR_DEPTH_121212:
+   params.ucBitPerColor = PANEL_12BIT_PER_COLOR;
+   break;
+   case COLOR_DEPTH_161616:
+   params.ucBitPerColor = PANEL_16BIT_PER_COLOR;
+   break;
+   default:
+   break;
+   }
+
 if (EXEC_BIOS_CMD_TABLE(DIGxEncoderControl, params))
 result = BP_RESULT_OK;

@@ -274,6 +291,23 @@ static enum bp_result encoder_control_digx_v4(
 cntl->enable_dp_audio));
 params.ucLaneNum = (uint8_t)(cntl->lanes_number);

+   switch (cntl->color_depth) {
+   case COLOR_DEPTH_888:
+   params.ucBitPerColor = PANEL_8BIT_PER_COLOR;
+   break;
+   case COLOR_DEPTH_101010:
+   params.ucBitPerColor = PANEL_10BIT_PER_COLOR;
+   break;
+   case COLOR_DEPTH_121212:
+   params.ucBitPerColor = PANEL_12BIT_PER_COLOR;
+   break;
+   case COLOR_DEPTH_161616:
+   params.ucBitPerColor = PANEL_16BIT_PER_COLOR;
+   break;
+   default:
+   break;
+   }
+
 if (EXEC_BIOS_CMD_TABLE(DIGxEncoderControl, params))
 result = BP_RESULT_OK;

@@ -1057,6 +1091,19 @@ static enum bp_result set_pixel_clock_v5(
  * driver choose program it itself, i.e. here we program it
  * to 888 by default.
  */
+   if (bp_params->signal_type == SIGNAL_TYPE_HDMI_TYPE_A)
+   switch (bp_params->color_depth) {
+   case TRANSMITTER_COLOR_DEPTH_30:
+   /* yes this is correct, the atom define is 
wrong */
+   clk.sPCLKInput.ucMiscInfo |= 
PIXEL_CLOCK_V5_MISC_HDMI_32BPP;
+   break;
+   case TRANSMITTER_COLOR_DEPTH_36:
+

Re: [PATCH 3/3] drm/amd/display: Skip modeset for front porch change

2021-02-08 Thread Kazlauskas, Nicholas

On 2021-01-24 11:00 p.m., Aurabindo Pillai wrote:



On 2021-01-21 2:05 p.m., Kazlauskas, Nicholas wrote:

On 2021-01-19 10:50 a.m., Aurabindo Pillai wrote:

[Why]
A seamless transition between modes can be performed if the new incoming
mode has the same timing parameters as the optimized mode on a 
display with a

variable vtotal min/max.

Smooth video playback usecases can be enabled with this seamless 
transition by

switching to a new mode which has a refresh rate matching the video.

[How]
Skip full modeset if userspace requested a compatible freesync mode 
which only

differs in the front porch timing from the current mode.

Signed-off-by: Aurabindo Pillai 
Acked-by: Christian König 
---
  .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 233 +++---
  .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h |   1 +
  2 files changed, 198 insertions(+), 36 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c

index aaef2fb528fd..d66494cdd8c8 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -213,6 +213,9 @@ static bool amdgpu_dm_psr_disable_all(struct 
amdgpu_display_manager *dm);

  static const struct drm_format_info *
  amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
+static bool
+is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
+ struct drm_crtc_state *new_crtc_state);
  /*
   * dm_vblank_get_counter
   *
@@ -4940,7 +4943,8 @@ static void 
fill_stream_properties_from_drm_display_mode(

  const struct drm_connector *connector,
  const struct drm_connector_state *connector_state,
  const struct dc_stream_state *old_stream,
-    int requested_bpc)
+    int requested_bpc,
+    bool is_in_modeset)
  {
  struct dc_crtc_timing *timing_out = &stream->timing;
  const struct drm_display_info *info = &connector->display_info;
@@ -4995,19 +4999,28 @@ static void 
fill_stream_properties_from_drm_display_mode(

  timing_out->hdmi_vic = hv_frame.vic;
  }
-    timing_out->h_addressable = mode_in->crtc_hdisplay;
-    timing_out->h_total = mode_in->crtc_htotal;
-    timing_out->h_sync_width =
-    mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
-    timing_out->h_front_porch =
-    mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
-    timing_out->v_total = mode_in->crtc_vtotal;
-    timing_out->v_addressable = mode_in->crtc_vdisplay;
-    timing_out->v_front_porch =
-    mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
-    timing_out->v_sync_width =
-    mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
-    timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
+    if (is_in_modeset) {
+    timing_out->h_addressable = mode_in->hdisplay;
+    timing_out->h_total = mode_in->htotal;
+    timing_out->h_sync_width = mode_in->hsync_end - 
mode_in->hsync_start;
+    timing_out->h_front_porch = mode_in->hsync_start - 
mode_in->hdisplay;

+    timing_out->v_total = mode_in->vtotal;
+    timing_out->v_addressable = mode_in->vdisplay;
+    timing_out->v_front_porch = mode_in->vsync_start - 
mode_in->vdisplay;
+    timing_out->v_sync_width = mode_in->vsync_end - 
mode_in->vsync_start;

+    timing_out->pix_clk_100hz = mode_in->clock * 10;
+    } else {
+    timing_out->h_addressable = mode_in->crtc_hdisplay;
+    timing_out->h_total = mode_in->crtc_htotal;
+    timing_out->h_sync_width = mode_in->crtc_hsync_end - 
mode_in->crtc_hsync_start;
+    timing_out->h_front_porch = mode_in->crtc_hsync_start - 
mode_in->crtc_hdisplay;

+    timing_out->v_total = mode_in->crtc_vtotal;
+    timing_out->v_addressable = mode_in->crtc_vdisplay;
+    timing_out->v_front_porch = mode_in->crtc_vsync_start - 
mode_in->crtc_vdisplay;
+    timing_out->v_sync_width = mode_in->crtc_vsync_end - 
mode_in->crtc_vsync_start;

+    timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
+    }
+


Not sure if I commented on this last time but I don't really 
understand what this is_in_modeset logic is supposed to be doing here.


This is so because create_stream_for_link() that ends up calling this 
function has two callers, one which is for stream validation in which 
the created stream is immediately discarded. The other is during 
modeset. Depending on these two cases, we want to copy the right timing 
parameters. With this method, major refactor wasn't necessary with the 
upper layers.


I don't understand why the timing parameters would change between what 
we validated and what we're planning on applying to the hardware. I 
think we should be validating the same thing in

Re: [PATCH] Revert "drm/amd/display: move edp sink present detection to hw init"

2021-02-08 Thread Kazlauskas, Nicholas

On 2021-02-08 2:25 p.m., Anson Jacob wrote:

This reverts commit de6571ecbb88643fa4bb4172e65c12795a2f3124.

Patch causes regression in resume time.


Shouldn't affect any system that has an eDP connector on the board since 
it's expected to be present in end user configuration.


If we want to replicate the same behavior we had before for eDP 
connector + eDP disconnected then we'd want to make sure we're skipping 
the registration for the connector in DM.


Regards,
Nicholas Kazlauskas


---
  drivers/gpu/drm/amd/display/dc/core/dc.c | 40 +++-
  drivers/gpu/drm/amd/display/dc/dc_link.h |  2 --
  2 files changed, 18 insertions(+), 24 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c 
b/drivers/gpu/drm/amd/display/dc/core/dc.c
index c9aede2f783d..8d5378f53243 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -205,9 +205,27 @@ static bool create_links(
link = link_create(&link_init_params);
  
  		if (link) {

+   bool should_destory_link = false;
+
+   if (link->connector_signal == SIGNAL_TYPE_EDP) {
+   if (dc->config.edp_not_connected) {
+   if 
(!IS_DIAG_DC(dc->ctx->dce_environment))
+   should_destory_link = true;
+   } else {
+   enum dc_connection_type type;
+   dc_link_detect_sink(link, &type);
+   if (type == dc_connection_none)
+   should_destory_link = true;
+   }
+   }
+
+   if (dc->config.force_enum_edp || !should_destory_link) {
dc->links[dc->link_count] = link;
link->dc = dc;
++dc->link_count;
+   } else {
+   link_destroy(&link);
+   }
}
}
  
@@ -998,30 +1016,8 @@ struct dc *dc_create(const struct dc_init_data *init_params)

return NULL;
  }
  
-static void detect_edp_presence(struct dc *dc)

-{
-   struct dc_link *edp_link = get_edp_link(dc);
-   bool edp_sink_present = true;
-
-   if (!edp_link)
-   return;
-
-   if (dc->config.edp_not_connected) {
-   edp_sink_present = false;
-   } else {
-   enum dc_connection_type type;
-   dc_link_detect_sink(edp_link, &type);
-   if (type == dc_connection_none)
-   edp_sink_present = false;
-   }
-
-   edp_link->edp_sink_present = edp_sink_present;
-}
-
  void dc_hardware_init(struct dc *dc)
  {
-
-   detect_edp_presence(dc);
if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW)
dc->hwss.init_hw(dc);
  }
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h 
b/drivers/gpu/drm/amd/display/dc/dc_link.h
index e189f16bc026..d5d8f0ad9233 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -103,8 +103,6 @@ struct dc_link {
bool lttpr_non_transparent_mode;
bool is_internal_display;
  
-	bool edp_sink_present;

-
/* caps is the same as reported_link_cap. link_traing use
 * reported_link_cap. Will clean up.  TODO
 */




___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] Revert "drm/amd/display: reuse current context instead of recreating one"

2021-02-10 Thread Kazlauskas, Nicholas

On 2021-02-10 9:25 a.m., Alex Deucher wrote:

This reverts commit 8866a67ab86cc0812e65c04f1ef02bcc41e24d68.

This breaks hotplug of HDMI on some systems, resulting in
a blank screen.

Bug: https://bugzilla.kernel.org/show_bug.cgi?id=211649>> Signed-off-by: Alex Deucher 

---


Hotplug is still working from my side with this patch.

Same with our weekly testing reports that Daniel's been putting out.

This is probably something environment or configuration specific but I 
don't see any logs from the reporter. I'll follow up on the ticket but 
I'd like to understand the problem in more detail before reverting this.


Regards,
Nicholas Kazlauskas


  .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 23 +---
  drivers/gpu/drm/amd/display/dc/core/dc.c  | 27 ++-
  drivers/gpu/drm/amd/display/dc/dc_stream.h|  3 ++-
  3 files changed, 23 insertions(+), 30 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 961abf1cf040..e438baa1adc1 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1934,7 +1934,7 @@ static void dm_gpureset_commit_state(struct dc_state 
*dc_state,
dc_commit_updates_for_stream(
dm->dc, bundle->surface_updates,
dc_state->stream_status->plane_count,
-   dc_state->streams[k], &bundle->stream_update);
+   dc_state->streams[k], &bundle->stream_update, dc_state);
}
  
  cleanup:

@@ -1965,7 +1965,8 @@ static void dm_set_dpms_off(struct dc_link *link)
  
  	stream_update.stream = stream_state;

dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
-stream_state, &stream_update);
+stream_state, &stream_update,
+stream_state->ctx->dc->current_state);
mutex_unlock(&adev->dm.dc_lock);
  }
  
@@ -7548,7 +7549,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,

struct drm_crtc *pcrtc,
bool wait_for_vblank)
  {
-   int i;
+   uint32_t i;
uint64_t timestamp_ns;
struct drm_plane *plane;
struct drm_plane_state *old_plane_state, *new_plane_state;
@@ -7589,7 +7590,7 @@ static void amdgpu_dm_commit_planes(struct 
drm_atomic_state *state,
amdgpu_dm_commit_cursors(state);
  
  	/* update planes when needed */

-   for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, 
new_plane_state, i) {
+   for_each_oldnew_plane_in_state(state, plane, old_plane_state, 
new_plane_state, i) {
struct drm_crtc *crtc = new_plane_state->crtc;
struct drm_crtc_state *new_crtc_state;
struct drm_framebuffer *fb = new_plane_state->fb;
@@ -7812,7 +7813,8 @@ static void amdgpu_dm_commit_planes(struct 
drm_atomic_state *state,
 bundle->surface_updates,
 planes_count,
 acrtc_state->stream,
-&bundle->stream_update);
+&bundle->stream_update,
+dc_state);
  
  		/**

 * Enable or disable the interrupts on the backend.
@@ -8148,13 +8150,13 @@ static void amdgpu_dm_atomic_commit_tail(struct 
drm_atomic_state *state)
struct dm_connector_state *dm_new_con_state = 
to_dm_connector_state(new_con_state);
struct dm_connector_state *dm_old_con_state = 
to_dm_connector_state(old_con_state);
struct amdgpu_crtc *acrtc = 
to_amdgpu_crtc(dm_new_con_state->base.crtc);
-   struct dc_surface_update surface_updates[MAX_SURFACES];
+   struct dc_surface_update dummy_updates[MAX_SURFACES];
struct dc_stream_update stream_update;
struct dc_info_packet hdr_packet;
struct dc_stream_status *status = NULL;
bool abm_changed, hdr_changed, scaling_changed;
  
-		memset(&surface_updates, 0, sizeof(surface_updates));

+   memset(&dummy_updates, 0, sizeof(dummy_updates));
memset(&stream_update, 0, sizeof(stream_update));
  
  		if (acrtc) {

@@ -8211,15 +8213,16 @@ static void amdgpu_dm_atomic_commit_tail(struct 
drm_atomic_state *state)
 * To fix this, DC should permit updating only stream 
properties.
 */
for (j = 0; j < status->plane_count; j++)
-   surface_updates[j].surface = status->plane_states[j];
+   dummy_updates[j].surface = status->plane_states[0];
  
  
  		mutex_lock

Re: [PATCH 2/2] amd/display: add cursor check for YUV primary plane

2021-02-19 Thread Kazlauskas, Nicholas

On 2021-02-19 11:19 a.m., Simon Ser wrote:

The cursor plane can't be displayed if the primary plane isn't
using an RGB format. Reject such atomic commits so that user-space
can have a fallback instead of an invisible cursor.

In theory we could support YUV if the cursor is also YUV, but at the
moment only ARGB cursors are supported.


Patch 1 looks good, but this patch needs to be adjusted.

We can support cursor plane, but only if we have an overlay plane 
enabled that's using XRGB/ARGB.


This is what we do on Chrome OS for video playback:

Cursor Plane - ARGB
Overlay Plane - ARGB Desktop/UI with cutout for video
Primary Plane - NV12 video

So this new check would break this usecase. It needs to check that there 
isn't an XRGB/ARGB plane at the top of the blending chain instead.


Regards,
Nicholas Kazlauskas



Signed-off-by: Simon Ser 
Cc: Alex Deucher 
Cc: Harry Wentland 
Cc: Nicholas Kazlauskas 
---
  drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 7 +++
  1 file changed, 7 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 4548b779bbce..f659e6cfdfcf 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -9239,6 +9239,13 @@ static int dm_check_crtc_cursor(struct drm_atomic_state 
*state,
return -EINVAL;
}
  
+	/* In theory we could probably support YUV cursors when the primary

+* plane uses a YUV format, but there's no use-case for it yet. */
+   if (new_primary_state->fb && new_primary_state->fb->format->is_yuv) {
+   drm_dbg_atomic(crtc->dev, "Cursor plane can't be used with YUV 
primary plane\n");
+   return -EINVAL;
+   }
+
return 0;
  }
  



___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 2/2] amd/display: add cursor check for YUV primary plane

2021-02-19 Thread Kazlauskas, Nicholas

On 2021-02-19 12:29 p.m., Simon Ser wrote:

On Friday, February 19th, 2021 at 6:22 PM, Kazlauskas, Nicholas 
 wrote:


We can support cursor plane, but only if we have an overlay plane
enabled that's using XRGB/ARGB.

This is what we do on Chrome OS for video playback:

Cursor Plane - ARGB
Overlay Plane - ARGB Desktop/UI with cutout for video
Primary Plane - NV12 video

So this new check would break this usecase. It needs to check that there
isn't an XRGB/ARGB plane at the top of the blending chain instead.


Oh, interesting. I'll adjust the patch.

Related: how does this affect scaling? Right now there is a check that makes
sure the cursor plane scaling matches the primary plane's. Should we instead
check that the cursor plane scaling matches the top-most XRGB/ARGB plane's?



Can't really do scaling on the cursor plane itself. It scales with the 
underlying pipe driving it so it'll only be correct if it matches that.


Primary plane isn't the correct check here since we always use the 
topmost pipe in the blending chain to draw the cursor - in the example I 
gave it'd have to match the overlay plane's scaling, not the primary 
plane's.


Regards,
Nicholas Kazlauskas

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amdgpu: stream's id should reduced after stream destruct

2021-02-22 Thread Kazlauskas, Nicholas

On 2021-02-20 1:30 a.m., ZhiJie.Zhang wrote:

Signed-off-by: ZhiJie.Zhang 
---
  drivers/gpu/drm/amd/display/dc/core/dc_stream.c | 2 ++
  1 file changed, 2 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index c103f858375d..dc7b7e57a86c 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -137,6 +137,8 @@ static void dc_stream_destruct(struct dc_stream_state 
*stream)
dc_transfer_func_release(stream->out_transfer_func);
stream->out_transfer_func = NULL;
}
+
+   stream->ctx->dc_stream_id_count--;


This is supposed to be a unique identifier so we shouldn't be reusing 
any old ID when creating a new stream.


Regards,
Nicholas Kazlauskas


  }
  
  void dc_stream_retain(struct dc_stream_state *stream)




___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amdgpu/display: fix compilation when CONFIG_DRM_AMD_DC_DCN is not set

2021-02-23 Thread Kazlauskas, Nicholas

On 2021-02-23 10:22 a.m., Alex Deucher wrote:

Missing some CONFIG_DRM_AMD_DC_DCN ifdefs.

Fixes: 9d99a805a9a0 ("drm/amd/display: Fix system hang after multiple hotplugs")
Signed-off-by: Alex Deucher 
Cc: Stephen Rothwell 
Cc: Qingqing Zhuo 


Reviewed-by: Nicholas Kazlauskas 

Regards,
Nicholas Kazlauskas


---
  drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 2 ++
  1 file changed, 2 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 7a393eeae4b1..22443e696567 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -5457,12 +5457,14 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, 
bool enable)
if (amdgpu_in_reset(adev))
return 0;
  
+#if defined(CONFIG_DRM_AMD_DC_DCN)

spin_lock_irqsave(&dm->vblank_lock, flags);
dm->vblank_workqueue->dm = dm;
dm->vblank_workqueue->otg_inst = acrtc->otg_inst;
dm->vblank_workqueue->enable = enable;
spin_unlock_irqrestore(&dm->vblank_lock, flags);
schedule_work(&dm->vblank_workqueue->mall_work);
+#endif
  
  	return 0;

  }



___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH v6 3/3] drm/amd/display: Skip modeset for front porch change

2021-02-25 Thread Kazlauskas, Nicholas

On 2021-02-12 8:08 p.m., Aurabindo Pillai wrote:

[Why]
A seamless transition between modes can be performed if the new incoming
mode has the same timing parameters as the optimized mode on a display with a
variable vtotal min/max.

Smooth video playback usecases can be enabled with this seamless transition by
switching to a new mode which has a refresh rate matching the video.

[How]
Skip full modeset if userspace requested a compatible freesync mode which only
differs in the front porch timing from the current mode.

Signed-off-by: Aurabindo Pillai 
Acked-by: Christian König 
---
  .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 220 ++
  1 file changed, 180 insertions(+), 40 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index c472905c7d72..628fec855e14 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -212,6 +212,9 @@ static bool amdgpu_dm_psr_disable_all(struct 
amdgpu_display_manager *dm);
  static const struct drm_format_info *
  amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
  
+static bool

+is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
+struct drm_crtc_state *new_crtc_state);
  /*
   * dm_vblank_get_counter
   *
@@ -335,6 +338,17 @@ static inline bool amdgpu_dm_vrr_active(struct 
dm_crtc_state *dm_state)
   dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
  }
  
+static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,

+ struct dm_crtc_state *new_state)
+{
+   if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
+   return true;
+   else if (amdgpu_dm_vrr_active(old_state) != 
amdgpu_dm_vrr_active(new_state))
+   return true;
+   else
+   return false;
+}
+
  /**
   * dm_pflip_high_irq() - Handle pageflip interrupt
   * @interrupt_params: ignored
@@ -5008,19 +5022,16 @@ static void 
fill_stream_properties_from_drm_display_mode(
timing_out->hdmi_vic = hv_frame.vic;
}
  
-	timing_out->h_addressable = mode_in->crtc_hdisplay;

-   timing_out->h_total = mode_in->crtc_htotal;
-   timing_out->h_sync_width =
-   mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
-   timing_out->h_front_porch =
-   mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
-   timing_out->v_total = mode_in->crtc_vtotal;
-   timing_out->v_addressable = mode_in->crtc_vdisplay;
-   timing_out->v_front_porch =
-   mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
-   timing_out->v_sync_width =
-   mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
-   timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
+   timing_out->h_addressable = mode_in->hdisplay;
+   timing_out->h_total = mode_in->htotal;
+   timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
+   timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
+   timing_out->v_total = mode_in->vtotal;
+   timing_out->v_addressable = mode_in->vdisplay;
+   timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
+   timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
+   timing_out->pix_clk_100hz = mode_in->clock * 10;
+
timing_out->aspect_ratio = get_aspect_ratio(mode_in);
  
  	stream->output_color_space = get_output_color_space(timing_out);

@@ -5240,6 +5251,33 @@ get_highest_refresh_rate_mode(struct amdgpu_dm_connector 
*aconnector,
return m_pref;
  }
  
+static bool is_freesync_video_mode(struct drm_display_mode *mode,

+  struct amdgpu_dm_connector *aconnector)
+{
+   struct drm_display_mode *high_mode;
+   int timing_diff;
+
+   high_mode = get_highest_refresh_rate_mode(aconnector, false);
+   if (!high_mode || !mode)
+   return false;
+
+   timing_diff = high_mode->vtotal - mode->vtotal;
+
+   if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
+   high_mode->hdisplay != mode->hdisplay ||
+   high_mode->vdisplay != mode->vdisplay ||
+   high_mode->hsync_start != mode->hsync_start ||
+   high_mode->hsync_end != mode->hsync_end ||
+   high_mode->htotal != mode->htotal ||
+   high_mode->hskew != mode->hskew ||
+   high_mode->vscan != mode->vscan ||
+   high_mode->vsync_start - mode->vsync_start != timing_diff ||
+   high_mode->vsync_end - mode->vsync_end != timing_diff)
+   return false;
+   else
+   return true;
+}
+
  static struct dc_stream_state *
  create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
   const struct drm_display_mode *drm_mode,
@@ -5253,8 +5291,10 @@

Re: [PATCH 3/6] amd/display: fail on cursor plane without an underlying plane

2021-03-04 Thread Kazlauskas, Nicholas

On 2021-03-04 4:05 a.m., Michel Dänzer wrote:

On 2021-03-03 8:17 p.m., Daniel Vetter wrote:

On Wed, Mar 3, 2021 at 5:53 PM Michel Dänzer  wrote:


On 2021-02-19 7:58 p.m., Simon Ser wrote:

Make sure there's an underlying pipe that can be used for the
cursor.

Signed-off-by: Simon Ser 
Cc: Alex Deucher 
Cc: Harry Wentland 
Cc: Nicholas Kazlauskas 
---
   drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 7 ++-
   1 file changed, 6 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c

index acbe1537e7cf..a5d6010405bf 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -9226,9 +9226,14 @@ static int dm_check_crtc_cursor(struct 
drm_atomic_state *state,

   }

   new_cursor_state = drm_atomic_get_new_plane_state(state, 
crtc->cursor);
- if (!new_cursor_state || !new_underlying_state || 
!new_cursor_state->fb)

+ if (!new_cursor_state || !new_cursor_state->fb)
   return 0;

+ if (!new_underlying_state || !new_underlying_state->fb) {
+ drm_dbg_atomic(crtc->dev, "Cursor plane can't be 
enabled without underlying plane\n");

+ return -EINVAL;
+ }
+
   cursor_scale_w = new_cursor_state->crtc_w * 1000 /
    (new_cursor_state->src_w >> 16);
   cursor_scale_h = new_cursor_state->crtc_h * 1000 /



Houston, we have a problem I'm afraid. Adding Daniel.


If the primary plane is enabled with a format which isn't compatible 
with the HW cursor,
and no overlay plane is enabled, the same issues as described in 
b836a274b797
"drm/amdgpu/dc: Require primary plane to be enabled whenever the CRTC 
is" can again occur:



* The legacy cursor ioctl fails with EINVAL for a non-0 cursor FB ID
  (which enables the cursor plane).

* If the cursor plane is enabled (e.g. using the legacy cursor ioctl
  during DPMS off), changing the legacy DPMS property value from off to
  on fails with EINVAL.


atomic_check should still be run when the crtc is off, so the legacy
cursor ioctl should fail when dpms off in this case already.


Good point. This could already be problematic though. E.g. mutter treats
EINVAL from the cursor ioctl as the driver not supporting HW cursors at
all, so it falls back to SW cursor and never tries to use the HW cursor
again. (I don't think mutter could hit this particular case with an
incompatible format though, but there might be other similar user space)



Moreover, in the same scenario plus an overlay plane enabled with a
HW cursor compatible format, if the FB bound to the overlay plane is
destroyed, the common DRM code will attempt to disable the overlay
plane, but dm_check_crtc_cursor will reject that now. I can't remember
exactly what the result is, but AFAIR it's not pretty.


CRTC gets disabled instead. That's why we went with the "always
require primary plane" hack. I think the only solution here would be
to enable the primary plane (but not in userspace-visible state, so
this needs to be done in the dc derived state objects only) that scans
out black any time we're in such a situation with cursor with no
planes.


This is about a scenario described by Nicholas earlier:

Cursor Plane - ARGB

Overlay Plane - ARGB Desktop/UI with cutout for video

Primary Plane - NV12 video

And destroying the FB bound to the overlay plane. The fallback to disable
the CRTC in atomic_remove_fb only kicks in for the primary plane, so it
wouldn't in this case and would fail. Which would in turn trigger the
WARN in drm_framebuffer_remove (and leave the overlay plane scanning out
from freed memory?).


The cleanest solution might be not to allow any formats incompatible with
the HW cursor for the primary plane.




Legacy X userspace doesn't use overlays but Chrome OS does.

This would regress ChromeOS MPO support because it relies on the NV12 
video plane being on the bottom.


When ChromeOS disables MPO it doesn't do it plane by plane, it does it 
in one go from NV12+ARGB -> ARGB8.


Regards,
Nicholas Kazlauskas
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 3/6] amd/display: fail on cursor plane without an underlying plane

2021-03-04 Thread Kazlauskas, Nicholas

On 2021-03-04 10:35 a.m., Michel Dänzer wrote:

On 2021-03-04 4:09 p.m., Kazlauskas, Nicholas wrote:

On 2021-03-04 4:05 a.m., Michel Dänzer wrote:

On 2021-03-03 8:17 p.m., Daniel Vetter wrote:

On Wed, Mar 3, 2021 at 5:53 PM Michel Dänzer  wrote:


Moreover, in the same scenario plus an overlay plane enabled with a
HW cursor compatible format, if the FB bound to the overlay plane is
destroyed, the common DRM code will attempt to disable the overlay
plane, but dm_check_crtc_cursor will reject that now. I can't remember
exactly what the result is, but AFAIR it's not pretty.


CRTC gets disabled instead. That's why we went with the "always
require primary plane" hack. I think the only solution here would be
to enable the primary plane (but not in userspace-visible state, so
this needs to be done in the dc derived state objects only) that scans
out black any time we're in such a situation with cursor with no
planes.


This is about a scenario described by Nicholas earlier:

Cursor Plane - ARGB

Overlay Plane - ARGB Desktop/UI with cutout for video

Primary Plane - NV12 video

And destroying the FB bound to the overlay plane. The fallback to disable
the CRTC in atomic_remove_fb only kicks in for the primary plane, so it
wouldn't in this case and would fail. Which would in turn trigger the
WARN in drm_framebuffer_remove (and leave the overlay plane scanning out
from freed memory?).


The cleanest solution might be not to allow any formats incompatible with
the HW cursor for the primary plane.


Legacy X userspace doesn't use overlays but Chrome OS does.

This would regress ChromeOS MPO support because it relies on the NV12
video plane being on the bottom.


Could it use the NV12 overlay plane below the ARGB primary plane?


Plane ordering was previously undefined in DRM so we have userspace that 
assumes overlays are on top.


Today we have the z-order property in DRM that defines where it is in 
the stack, so technically it could but we'd also be regressing existing 
behavior on Chrome OS today.






When ChromeOS disables MPO it doesn't do it plane by plane, it does it
in one go from NV12+ARGB -> ARGB8.


Even so, we cannot expect all user space to do the same, and we cannot
allow any user space to trigger a WARN and scanout from freed memory.




The WARN doesn't trigger because there's still a reference on the FB - 
the reference held by DRM since it's still scanning out the overlay. 
Userspace can't reclaim this memory with another buffer allocation 
because it's still in use.


It's a little odd that a disable commit can fail, but I don't think 
there's anything in DRM core that specifies that this can't happen for 
planes.


Regards,
Nicholas Kazlauskas
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 2/4] drm/amdgpu/display: don't assert in set backlight function

2021-03-04 Thread Kazlauskas, Nicholas

On 2021-03-04 12:41 p.m., Alex Deucher wrote:

It just spams the logs.

Signed-off-by: Alex Deucher 


This series in general looks reasonable to me:
Reviewed-by: Nicholas Kazlauskas 


---
  drivers/gpu/drm/amd/display/dc/core/dc_link.c | 1 -
  1 file changed, 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index fa9a62dc174b..974b70f21837 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -2614,7 +2614,6 @@ bool dc_link_set_backlight_level(const struct dc_link 
*link,
if (pipe_ctx->plane_state == NULL)
frame_ramp = 0;
} else {
-   ASSERT(false);


Just a comment on what's actually going on here with this warning:

Technically we can't apply the backlight level without a plane_state in 
the context but the panel is also off anyway.


I think there might be a bug here when the panel turns on and we're not 
applying values set when it was off but I don't think anyone's reported 
this as an issue.


I'm not entirely sure if the value gets cached and reapplied with the 
correct value later, but it's something to keep in mind.


Regards,
Nicholas Kazlauskas


return false;
}
  



___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 2/4] drm/amdgpu/display: don't assert in set backlight function

2021-03-04 Thread Kazlauskas, Nicholas

On 2021-03-04 1:41 p.m., Alex Deucher wrote:

On Thu, Mar 4, 2021 at 1:33 PM Kazlauskas, Nicholas
 wrote:


On 2021-03-04 12:41 p.m., Alex Deucher wrote:

It just spams the logs.

Signed-off-by: Alex Deucher 


This series in general looks reasonable to me:
Reviewed-by: Nicholas Kazlauskas 


---
   drivers/gpu/drm/amd/display/dc/core/dc_link.c | 1 -
   1 file changed, 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index fa9a62dc174b..974b70f21837 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -2614,7 +2614,6 @@ bool dc_link_set_backlight_level(const struct dc_link 
*link,
   if (pipe_ctx->plane_state == NULL)
   frame_ramp = 0;
   } else {
- ASSERT(false);


Just a comment on what's actually going on here with this warning:

Technically we can't apply the backlight level without a plane_state in
the context but the panel is also off anyway.

I think there might be a bug here when the panel turns on and we're not
applying values set when it was off but I don't think anyone's reported
this as an issue.

I'm not entirely sure if the value gets cached and reapplied with the
correct value later, but it's something to keep in mind.


It doesn't.  I have additional patches here to cache it:
https://nam11.safelinks.protection.outlook.com/?url=https:%2F%2Fcgit.freedesktop.org%2F~agd5f%2Flinux%2Flog%2F%3Fh%3Dbacklight_wip&data=04%7C01%7Cnicholas.kazlauskas%40amd.com%7Cf259e9290b0e4ffbc87308d8df3d3121%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C637504801203988045%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C1000&sdata=4ROSclecKkfu2km3YeeM7sZK%2FP%2BcC8BajHSxJXQQCRw%3D&reserved=0

Alex


That's aligned with my expectations then.

I can take a peek at the branch and help review the patches.

Regards,
Nicholas Kazlauskas





Regards,
Nicholas Kazlauskas


   return false;
   }




___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flists.freedesktop.org%2Fmailman%2Flistinfo%2Famd-gfx&data=04%7C01%7Cnicholas.kazlauskas%40amd.com%7Cf259e9290b0e4ffbc87308d8df3d3121%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C637504801203988045%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C1000&sdata=%2BbwO5oOXuXFWD%2F8qNTygROj%2B9ZItRsXJb1U7ilcICh4%3D&reserved=0


___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 3/6] amd/display: fail on cursor plane without an underlying plane

2021-03-08 Thread Kazlauskas, Nicholas

On 2021-03-08 3:18 p.m., Daniel Vetter wrote:

On Fri, Mar 5, 2021 at 10:24 AM Michel Dänzer  wrote:


On 2021-03-04 7:26 p.m., Kazlauskas, Nicholas wrote:

On 2021-03-04 10:35 a.m., Michel Dänzer wrote:

On 2021-03-04 4:09 p.m., Kazlauskas, Nicholas wrote:

On 2021-03-04 4:05 a.m., Michel Dänzer wrote:

On 2021-03-03 8:17 p.m., Daniel Vetter wrote:

On Wed, Mar 3, 2021 at 5:53 PM Michel Dänzer 
wrote:


Moreover, in the same scenario plus an overlay plane enabled with a
HW cursor compatible format, if the FB bound to the overlay plane is
destroyed, the common DRM code will attempt to disable the overlay
plane, but dm_check_crtc_cursor will reject that now. I can't
remember
exactly what the result is, but AFAIR it's not pretty.


CRTC gets disabled instead. That's why we went with the "always
require primary plane" hack. I think the only solution here would be
to enable the primary plane (but not in userspace-visible state, so
this needs to be done in the dc derived state objects only) that scans
out black any time we're in such a situation with cursor with no
planes.


This is about a scenario described by Nicholas earlier:

Cursor Plane - ARGB

Overlay Plane - ARGB Desktop/UI with cutout for video

Primary Plane - NV12 video

And destroying the FB bound to the overlay plane. The fallback to
disable
the CRTC in atomic_remove_fb only kicks in for the primary plane, so it
wouldn't in this case and would fail. Which would in turn trigger the
WARN in drm_framebuffer_remove (and leave the overlay plane scanning
out
from freed memory?).


The cleanest solution might be not to allow any formats incompatible
with
the HW cursor for the primary plane.


Legacy X userspace doesn't use overlays but Chrome OS does.

This would regress ChromeOS MPO support because it relies on the NV12
video plane being on the bottom.


Could it use the NV12 overlay plane below the ARGB primary plane?


Plane ordering was previously undefined in DRM so we have userspace that
assumes overlays are on top.


They can still be by default?


Today we have the z-order property in DRM that defines where it is in
the stack, so technically it could but we'd also be regressing existing
behavior on Chrome OS today.


That's unfortunate, but might be the least bad choice overall.

BTW, doesn't Chrome OS try to disable the ARGB overlay plane while there are no 
UI elements to display? If it does, this series might break it anyway (if the 
cursor plane can be enabled while the ARGB overlay plane is off).



When ChromeOS disables MPO it doesn't do it plane by plane, it does it
in one go from NV12+ARGB -> ARGB8.


Even so, we cannot expect all user space to do the same, and we cannot
allow any user space to trigger a WARN and scanout from freed memory.


The WARN doesn't trigger because there's still a reference on the FB -


The WARN triggers if atomic_remove_fb returns an error, which is the case if it can't 
disable an overlay plane. I actually hit this with IGT tests while working on 
b836a274b797 "drm/amdgpu/dc: Require primary plane to be enabled whenever the CRTC 
is" (I initially tried allowing the cursor plane to be enabled together with an 
overlay plane while the primary plane is off).


the reference held by DRM since it's still scanning out the overlay.
Userspace can't reclaim this memory with another buffer allocation
because it's still in use.


Good point, so at least there's no scanout of freed memory. Even so, the 
overlay plane continues displaying contents which user space apparently doesn't 
want to be displayed anymore.


Hm I do wonder how much we need to care for this. If you use planes,
you better use TEST_ONLY in atomic to it's full extend (including
cursor, if that's a real plane, which it is for every driver except
msm/mdp4). If userspace screws this up and worse, shuts of planes with
an RMFB, I think it's not entirely unreasonable to claim that it
should keep the pieces.

So maybe we should refine the WARN_ON to not trigger if other planes
than crtc->primary and crtc->cursor are enabled right now?


It's a little odd that a disable commit can fail, but I don't think
there's anything in DRM core that specifies that this can't happen for
planes.


I'd say it's more than just a little odd. :) Being unable to disable an overlay 
plane seems very surprising, and could make it tricky for user space (not to 
mention core DRM code like atomic_remove_fb) to find a solution.

I'd suggest the amdgpu DM code should rather virtualize the KMS API planes 
somehow such that an overlay plane can always be disabled. While this might 
incur some short-term pain, it will likely save more pain overall in the long 
term.


Yeah I think this amd dc cursor problem is the first case where
removing a plane can make things worse.

Since the hw is what it is, ca

Re: [PATCH 2/2] drm/amd/display: Enable fp16 also on DCE-11.0 - DCE-12.

2020-05-20 Thread Kazlauskas, Nicholas

On 2020-05-15 1:19 a.m., Mario Kleiner wrote:

Testing on a Polaris11 gpu with DCE-11.2 suggests that it
seems to work fine there, so optimistically enable it for
DCE-11 and later.

Signed-off-by: Mario Kleiner 


Series is:

Reviewed-by: Nicholas Kazlauskas 

Thanks!


---
  drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c | 2 +-
  drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c | 2 +-
  drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c | 2 +-
  3 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c 
b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
index 9597fc79d7fa..a043ddae5149 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
@@ -410,7 +410,7 @@ static const struct dc_plane_cap plane_cap = {
.pixel_format_support = {
.argb = true,
.nv12 = false,
-   .fp16 = false
+   .fp16 = true
},
  
  		.max_upscale_factor = {

diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c 
b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
index 4a7796de2ff5..51b3fe502670 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
@@ -411,7 +411,7 @@ static const struct dc_plane_cap plane_cap = {
.pixel_format_support = {
.argb = true,
.nv12 = false,
-   .fp16 = false
+   .fp16 = true
},
  
  	.max_upscale_factor = {

diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 
b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
index 9a9764cbd78d..8f362e8c1787 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
@@ -516,7 +516,7 @@ static const struct dc_plane_cap plane_cap = {
.pixel_format_support = {
.argb = true,
.nv12 = false,
-   .fp16 = false
+   .fp16 = true
},
  
  	.max_upscale_factor = {




___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amd/display: Handle GPU reset for DC block

2020-05-20 Thread Kazlauskas, Nicholas

On 2020-05-20 11:29 a.m., Bhawanpreet Lakha wrote:

[Why]
Previously we used the s3 codepath for gpu reset. This can lead to issues in
certain case where we end of waiting for fences which will never come (because
parts of the hw are off due to gpu reset) and we end up waiting forever causing
a deadlock.

[How]
Handle GPU reset separately from normal s3 case. We essentially need to redo
everything we do in s3, but avoid any drm calls.

For GPU reset case

suspend:
-Acquire DC lock
-Cache current dc_state
-Commit 0 stream/planes to dc (this puts dc into a state where it can be
 powered off)
-Disable interrupts
resume
-Edit cached state to force full update
-Commit cached state from suspend
-Build stream and plane updates from the cached state
-Commit stream/plane updates
-Enable interrupts
-Release DC lock


Some comments inline below, but mostly looks good.




Signed-off-by: Bhawanpreet Lakha 
---
  .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 175 +-
  .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h |   1 +
  2 files changed, 175 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 60fe64aef11b..46bb6e156f81 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1521,10 +1521,109 @@ static int dm_hw_fini(void *handle)
return 0;
  }
  
+

+static int dm_enable_vblank(struct drm_crtc *crtc);
+static void dm_disable_vblank(struct drm_crtc *crtc);
+
+static void dm_gpureset_interrupt(struct amdgpu_device *adev,
+struct dc_state *state, bool enable)
+{


dm_gpureset_toggle_interrupts() might be more clear since this isn't an 
interrupt handler.



+   enum dc_irq_source irq_source;
+   struct amdgpu_crtc *acrtc;
+   int rc = -EBUSY;
+   int i = 0;
+
+   for (i = 0; i < state->stream_count; i++) {
+   acrtc = get_crtc_by_otg_inst(
+   adev, state->stream_status[i].primary_otg_inst);
+
+   if (acrtc && state->stream_status[i].plane_count != 0) {
+   irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
+   rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) 
? 0 : -EBUSY;
+   DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
+ acrtc->crtc_id, enable ? "en" : "dis", rc);
+   if (rc)
+   DRM_WARN("Failed to %s pflip interrupts\n",
+enable ? "enable" : "disable");
+
+   if (enable){


Style nitpick, should be if (enable) {


+   rc = dm_enable_vblank(&acrtc->base);
+   if (rc)
+   DRM_WARN("Failed to enable vblank 
interrupts\n");
+   } else


Let's keep the } else {

}

here since we're already using on the if above.


+   dm_disable_vblank(&acrtc->base);
+
+   }
+   }
+
+}
+
+enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
+{
+   struct dc_state *context = NULL;
+   enum dc_status res = DC_ERROR_UNEXPECTED;
+   int i;
+   struct dc_stream_state *del_streams[MAX_PIPES] = { 0 };


Let's use memset for this rather than = { 0 }; , some compilers complain.


+   int del_streams_count = 0;
+
+   context = dc_create_state(dc);
+   if (context == NULL)
+   goto context_alloc_fail;
+
+   dc_resource_state_copy_construct_current(dc, context);
+
+   /* First remove from context all streams */
+   for (i = 0; i < context->stream_count; i++) {
+   struct dc_stream_state *stream = context->streams[i];


Need an extra blank line here.


+   del_streams[del_streams_count++] = stream;
+   }
+
+   /* Remove all planes for removed streams and then remove the streams */
+   for (i = 0; i < del_streams_count; i++) {
+   if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) 
{
+   res = DC_FAIL_DETACH_SURFACES;
+   goto fail;
+   }
+
+   res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
+   if (res != DC_OK)
+   goto fail;
+   }
+
+
+   res = dc_validate_global_state(dc, context, false);
+
+   if (res != DC_OK) {
+   DRM_ERROR("%s:resource validation failed, dc_status:%d\n", 
__func__, res);
+   goto fail;
+   }
+
+   res = dc_commit_state(dc, context);
+
+fail:
+   dc_release_state(context);
+
+context_alloc_fail:
+   return res;
+}
+
  static int dm_suspend(void *handle)
  {
struct amdgpu_device *adev = handle;
stru

Re: [PATCH 2/2] drm/amd/display: Enable fp16 also on DCE-11.0 - DCE-12.

2020-05-20 Thread Kazlauskas, Nicholas

On 2020-05-20 2:44 p.m., Mario Kleiner wrote:
On Wed, May 20, 2020 at 8:25 PM Alex Deucher > wrote:


On Wed, May 20, 2020 at 12:39 PM Harry Wentland mailto:hwent...@amd.com>> wrote:
 >
 > On 2020-05-15 1:19 a.m., Mario Kleiner wrote:
 > > Testing on a Polaris11 gpu with DCE-11.2 suggests that it
 > > seems to work fine there, so optimistically enable it for
 > > DCE-11 and later.
 > >
 > > Signed-off-by: Mario Kleiner mailto:mario.kleiner...@gmail.com>>
 > > ---
 > >  drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c | 2 +-
 > >  drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c | 2 +-
 > >  drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c | 2 +-
 > >  3 files changed, 3 insertions(+), 3 deletions(-)
 > >
 > > diff --git
a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
 > > index 9597fc79d7fa..a043ddae5149 100644
 > > --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
 > > +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
 > > @@ -410,7 +410,7 @@ static const struct dc_plane_cap plane_cap = {
 > >               .pixel_format_support = {
 > >                               .argb = true,
 > >                               .nv12 = false,
 > > -                             .fp16 = false
 > > +                             .fp16 = true
 >
 > Carrizo (DCE 11.0) has a HW bug where FP16 scaling doesn't work. I
 > recommend we leave it off here.

I'll drop this hunk for upstream.

Alex


Ok, no fixup patch needed from myself, thanks Alex. Does the scaling bug 
refer to scaling the planes (those max_downscale_factor / 
max_upscale_factor definitions seem to be unused) or the fp16 values itself?


What about DCE 8 and DCE 10 hw capabilities wrt. fp16? Should i send 
fp16 enable patches for those as well?


-mario


Yeah, the upscale and downscale factors were intended to block FP16 
accepted and reject the commit but I guess nobody ever added those to 
atomic check.


I reviewed the patch with the idea in mind that we already blocked this 
on a DC level. We can re-enable it in the caps after this is in I think.


Off the top of my head I don't remember what DCE8/DCE10 supports, but 
I'm also not sure if they even support sending the SDP message for those 
to really be usable.


Regards,
Nicholas Kazlauskas



 >
 > Harry
 >
 > >               },
 > >
 > >               .max_upscale_factor = {
 > > diff --git
a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
 > > index 4a7796de2ff5..51b3fe502670 100644
 > > --- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
 > > +++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
 > > @@ -411,7 +411,7 @@ static const struct dc_plane_cap plane_cap = {
 > >       .pixel_format_support = {
 > >                       .argb = true,
 > >                       .nv12 = false,
 > > -                     .fp16 = false
 > > +                     .fp16 = true
 > >       },
 > >
 > >       .max_upscale_factor = {
 > > diff --git
a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
 > > index 9a9764cbd78d..8f362e8c1787 100644
 > > --- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
 > > +++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
 > > @@ -516,7 +516,7 @@ static const struct dc_plane_cap plane_cap = {
 > >       .pixel_format_support = {
 > >                       .argb = true,
 > >                       .nv12 = false,
 > > -                     .fp16 = false
 > > +                     .fp16 = true
 > >       },
 > >
 > >       .max_upscale_factor = {
 > >
 > ___
 > dri-devel mailing list
 > dri-de...@lists.freedesktop.org

 > https://lists.freedesktop.org/mailman/listinfo/dri-devel




___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amd/display: Handle GPU reset for DC block

2020-05-22 Thread Kazlauskas, Nicholas

On 2020-05-22 10:45 a.m., Alex Deucher wrote:

On Thu, May 21, 2020 at 5:39 PM Bhawanpreet Lakha
 wrote:


[Why]
Previously we used the s3 codepath for gpu reset. This can lead to issues in
certain case where we end of waiting for fences which will never come (because
parts of the hw are off due to gpu reset) and we end up waiting forever causing
a deadlock.

[How]
Handle GPU reset separately from normal s3 case. We essentially need to redo
everything we do in s3, but avoid any drm calls.

For GPU reset case

suspend:
 -Acquire DC lock
 -Cache current dc_state
 -Commit 0 stream/planes to dc (this puts dc into a state where it can 
be
  powered off)
 -Disable interrupts
resume
 -Edit cached state to force full update
 -Commit cached state from suspend
 -Build stream and plane updates from the cached state
 -Commit stream/plane updates
 -Enable interrupts
 -Release DC lock

v2:
-Formatting
-Release dc_state

Signed-off-by: Bhawanpreet Lakha 


Acked-by: Alex Deucher 


Looks good to me now.

Reviewed-by: Nicholas Kazlauskas 

Regards,
Nicholas Kazlauskas




---
  .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 182 +-
  .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h |   1 +
  2 files changed, 182 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 60fe64aef11b..4110ff8580b7 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1521,10 +1521,114 @@ static int dm_hw_fini(void *handle)
 return 0;
  }

+
+static int dm_enable_vblank(struct drm_crtc *crtc);
+static void dm_disable_vblank(struct drm_crtc *crtc);
+
+static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
+struct dc_state *state, bool enable)
+{
+   enum dc_irq_source irq_source;
+   struct amdgpu_crtc *acrtc;
+   int rc = -EBUSY;
+   int i = 0;
+
+   for (i = 0; i < state->stream_count; i++) {
+   acrtc = get_crtc_by_otg_inst(
+   adev, state->stream_status[i].primary_otg_inst);
+
+   if (acrtc && state->stream_status[i].plane_count != 0) {
+   irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
+   rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) 
? 0 : -EBUSY;
+   DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
+ acrtc->crtc_id, enable ? "en" : "dis", rc);
+   if (rc)
+   DRM_WARN("Failed to %s pflip interrupts\n",
+enable ? "enable" : "disable");
+
+   if (enable) {
+   rc = dm_enable_vblank(&acrtc->base);
+   if (rc)
+   DRM_WARN("Failed to enable vblank 
interrupts\n");
+   } else {
+   dm_disable_vblank(&acrtc->base);
+   }
+
+   }
+   }
+
+}
+
+enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
+{
+   struct dc_state *context = NULL;
+   enum dc_status res = DC_ERROR_UNEXPECTED;
+   int i;
+   struct dc_stream_state *del_streams[MAX_PIPES];
+   int del_streams_count = 0;
+
+   memset(del_streams, 0, sizeof(del_streams));
+
+   context = dc_create_state(dc);
+   if (context == NULL)
+   goto context_alloc_fail;
+
+   dc_resource_state_copy_construct_current(dc, context);
+
+   /* First remove from context all streams */
+   for (i = 0; i < context->stream_count; i++) {
+   struct dc_stream_state *stream = context->streams[i];
+
+   del_streams[del_streams_count++] = stream;
+   }
+
+   /* Remove all planes for removed streams and then remove the streams */
+   for (i = 0; i < del_streams_count; i++) {
+   if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) 
{
+   res = DC_FAIL_DETACH_SURFACES;
+   goto fail;
+   }
+
+   res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
+   if (res != DC_OK)
+   goto fail;
+   }
+
+
+   res = dc_validate_global_state(dc, context, false);
+
+   if (res != DC_OK) {
+   DRM_ERROR("%s:resource validation failed, dc_status:%d\n", 
__func__, res);
+   goto fail;
+   }
+
+   res = dc_commit_state(dc, context);
+
+fail:
+   dc_release_state(context);
+
+context_alloc_fail:
+   return res;
+}
+
  static int dm_suspend(void *handle)
  {
 struct amdgpu_device *adev = handle;
 struct amdgpu_display_manager *dm = &adev->dm;
+   int ret = 0;
+
+   if (adev

Re: [PATCH] drm/amdgpu/display: fix logic inversion in program_timing_sync()

2020-05-28 Thread Kazlauskas, Nicholas

I still think we should just drop the reduction loop.

The problem with checking plane_state at all is that this logic will 
always be broken - plane_state isn't a good indicator of whether the 
stream is blanked or not since we can leave an OTG running with no 
planes at all while unblanked.


Regards,
Nicholas Kazlauskas

On 2020-05-27 4:51 p.m., Alex Deucher wrote:

Can we apply this for now until we can get further analysis on the
actual root cause?

Alex

On Mon, Apr 6, 2020 at 10:44 AM Alex Deucher  wrote:


Ping again?

Alex

On Thu, Feb 20, 2020 at 8:27 AM Alex Deucher  wrote:


On Tue, Feb 4, 2020 at 9:06 AM Kazlauskas, Nicholas
 wrote:


Comments inline.

On 2020-02-03 4:07 p.m., Alex Deucher wrote:

Ping?

On Fri, Jan 10, 2020 at 3:11 PM Alex Deucher  wrote:


It looks like we should be reducing the group size when we don't
have a plane rather than when we do.

Bug: https://gitlab.freedesktop.org/drm/amd/issues/781
Fixes: 5fc0cbfad45648 ("drm/amd/display: determine if a pipe is synced by plane 
state")
Signed-off-by: Alex Deucher 
---
   drivers/gpu/drm/amd/display/dc/core/dc.c | 4 ++--
   1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c 
b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 3d89904003f0..01b27726d9c5 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -1003,9 +1003,9 @@ static void program_timing_sync(
  status->timing_sync_info.master = false;

  }
-   /* remove any other pipes with plane as they have already been 
synced */
+   /* remove any other pipes without plane as they have already 
been synced */


This took a while to wrap my head around but I think I understand what
this was originally trying to do.

The original logic seems to have been checking for blanked streams and
trying to remove anything that was blanked from the group to try and
avoid having to enable timing synchronization.

However, the logic for blanked is *not* the same as having a
plane_state. Technically you can drive an OTG without anything connected
in the front end and it'll just draw out the back color - which is
distinct from having the OTG be blanked.

The problem is really this iteration below:


  for (j = j + 1; j < group_size; j++) {


There could still be pipes in here (depending on the ordering) that have
planes and could be synchronized with the master OTG. I think starting
at j + 1 is a mistake for this logic as well.

I wonder if we can just drop this loop altogether. If we add planes or
unblank the OTG later then we'll still want the synchronization.

Dymtro, Wenjing - feel free to correct my understanding if I'm mistaken
about this.


Ping?  Any thoughts on this?  It would be nice to get this fixed.

Alex




Regards,
Nicholas Kazlauskas


-   if (pipe_set[j]->plane_state) {
+   if (!pipe_set[j]->plane_state) {
  group_size--;
  pipe_set[j] = pipe_set[group_size];
  j--;
--
2.24.1


___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx





___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] Revert "drm/amd/display: disable dcn20 abm feature for bring up"

2020-05-28 Thread Kazlauskas, Nicholas

On 2020-05-28 10:08 a.m., Alex Deucher wrote:

On Thu, May 28, 2020 at 9:47 AM Harry Wentland  wrote:


This reverts commit 96cb7cf13d8530099c256c053648ad576588c387.

This change was used for DCN2 bringup and is no longer desired.
In fact it breaks backlight on DCN2 systems.

Cc: Alexander Monakov 
Cc: Hersen Wu 
Cc: Anthony Koo 
Cc: Michael Chiu 
Signed-off-by: Harry Wentland 


Acked-by: Alex Deucher 


Reviewed-by: Nicholas Kazlauskas 




---
  drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 11 ---
  1 file changed, 4 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index ddc979e3eebe..acd4874e0743 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1356,7 +1356,7 @@ static int dm_late_init(void *handle)
 unsigned int linear_lut[16];
 int i;
 struct dmcu *dmcu = NULL;
-   bool ret = false;
+   bool ret;

 if (!adev->dm.fw_dmcu)
 return detect_mst_link_for_all_connectors(adev->ddev);
@@ -1377,13 +1377,10 @@ static int dm_late_init(void *handle)
  */
 params.min_abm_backlight = 0x28F;

-   /* todo will enable for navi10 */
-   if (adev->asic_type <= CHIP_RAVEN) {
-   ret = dmcu_load_iram(dmcu, params);
+   ret = dmcu_load_iram(dmcu, params);

-   if (!ret)
-   return -EINVAL;
-   }
+   if (!ret)
+   return -EINVAL;

 return detect_mst_link_for_all_connectors(adev->ddev);
  }
--
2.26.2

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx



___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amdgpu/display: drop the reduction loop when setting the sync groups

2020-05-29 Thread Kazlauskas, Nicholas

On 2020-05-28 10:06 a.m., Alex Deucher wrote:

The logic for blanked is not the same as having a plane_state. Technically
you can drive an OTG without anything connected in the front end and it'll
just draw out the back color which is distinct from having the OTG be blanked.
If we add planes or unblank the OTG later then we'll still want the
synchronization.

Bug: https://gitlab.freedesktop.org/drm/amd/issues/781
Fixes: 5fc0cbfad45648 ("drm/amd/display: determine if a pipe is synced by plane 
state")
Cc: nicholas.kazlaus...@amd.com
Signed-off-by: Alex Deucher  > ---
  drivers/gpu/drm/amd/display/dc/core/dc.c | 8 
  1 file changed, 8 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c 
b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 04c3d9f7e323..6279520f7873 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -1040,14 +1040,6 @@ static void program_timing_sync(
status->timing_sync_info.master = false;
  
  		}

-   /* remove any other pipes with plane as they have already been 
synced */
-   for (j = j + 1; j < group_size; j++) {
-   if (pipe_set[j]->plane_state) {
-   group_size--;
-   pipe_set[j] = pipe_set[group_size];
-   j--;
-   }
-   }



Looking at this again, I think I may understand the issue this was 
trying to work around.


If we try to force timing synchronization on displays that are currently 
active then this is going to force reset the vertical position, 
resulting in screen corruption.


So what this logic was attempting to do was ensure that timing 
synchronization only happens when committing two streams at a time 
without any image on the screen.


Maybe it'd be best to just blank these streams out first, but for now, 
let's actually go back to fixing this by applying the actual dpg/tg 
check that Wenjing suggests, something like:


   if (pool->opps[i]->funcs->dpg_is_blanked)
s.blank_enabled = 
pool->opps[i]->funcs->dpg_is_blanked(pool->opps[i]);

   else
s.blank_enabled = tg->funcs->is_blanked(tg);



The reason why we have this issue in the first place is because 
amdgpu_dm usually commits a dc_state with the planes already in it 
instead of committing them later, so plane_state not being NULL is 
typically true.


Regards,
Nicholas Kazlauskas

  
  		if (group_size > 1) {

dc->hwss.enable_timing_synchronization(



___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amdgpu/display: drop the reduction loop when setting the sync groups

2020-05-29 Thread Kazlauskas, Nicholas

On 2020-05-29 2:04 p.m., Alex Deucher wrote:

On Fri, May 29, 2020 at 9:56 AM Kazlauskas, Nicholas
 wrote:


On 2020-05-28 10:06 a.m., Alex Deucher wrote:

The logic for blanked is not the same as having a plane_state. Technically
you can drive an OTG without anything connected in the front end and it'll
just draw out the back color which is distinct from having the OTG be blanked.
If we add planes or unblank the OTG later then we'll still want the
synchronization.

Bug: https://gitlab.freedesktop.org/drm/amd/issues/781
Fixes: 5fc0cbfad45648 ("drm/amd/display: determine if a pipe is synced by plane 
state")
Cc: nicholas.kazlaus...@amd.com
Signed-off-by: Alex Deucher  > ---
   drivers/gpu/drm/amd/display/dc/core/dc.c | 8 
   1 file changed, 8 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c 
b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 04c3d9f7e323..6279520f7873 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -1040,14 +1040,6 @@ static void program_timing_sync(
   status->timing_sync_info.master = false;

   }
- /* remove any other pipes with plane as they have already been 
synced */
- for (j = j + 1; j < group_size; j++) {
- if (pipe_set[j]->plane_state) {
- group_size--;
- pipe_set[j] = pipe_set[group_size];
- j--;
- }
- }



Looking at this again, I think I may understand the issue this was
trying to work around.

If we try to force timing synchronization on displays that are currently
active then this is going to force reset the vertical position,
resulting in screen corruption.

So what this logic was attempting to do was ensure that timing
synchronization only happens when committing two streams at a time
without any image on the screen.

Maybe it'd be best to just blank these streams out first, but for now,
let's actually go back to fixing this by applying the actual dpg/tg
check that Wenjing suggests, something like:

 if (pool->opps[i]->funcs->dpg_is_blanked)
  s.blank_enabled =
pool->opps[i]->funcs->dpg_is_blanked(pool->opps[i]);
 else
  s.blank_enabled = tg->funcs->is_blanked(tg);



Hmm, it's not clear to me where this code needs to go.  Can you point
me in the right direction or provide a quick patch?

Thanks,

Alex


The old code used to check !tg->funcs->is_blanked(tg) ie. to drop the 
pipe from the group if it's currently active.


The issue was that on newer ASIC it's now the DPG that's the indicator 
from the hardware side, so we should replace the !plane_state check with 
a check first for !dpg_is_blanked and then !is_blanked if the DPG 
doesn't exist.


Regards,
Nicholas Kazlauskas






The reason why we have this issue in the first place is because
amdgpu_dm usually commits a dc_state with the planes already in it
instead of committing them later, so plane_state not being NULL is
typically true.

Regards,
Nicholas Kazlauskas



   if (group_size > 1) {
   dc->hwss.enable_timing_synchronization(





___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amdgpu/display: use blanked rather than plane state for sync groups

2020-06-02 Thread Kazlauskas, Nicholas

On 2020-06-02 5:25 p.m., Alex Deucher wrote:

We may end up with no planes set yet, depending on the ordering, but we
should have the proper blanking state which is either handled by either
DPG or TG depending on the hardware generation.  Check both to determine
the proper blanked state.

Bug: https://gitlab.freedesktop.org/drm/amd/issues/781
Fixes: 5fc0cbfad45648 ("drm/amd/display: determine if a pipe is synced by plane 
state")
Cc: nicholas.kazlaus...@amd.com
Signed-off-by: Alex Deucher 


This looks good to me now from a conceptual level. I guess we'll find 
out later if it breaks anything.


Reviewed-by: Nicholas Kazlauskas 

Regards,
Nicholas Kazlauskas


---
  drivers/gpu/drm/amd/display/dc/core/dc.c | 24 
  1 file changed, 20 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c 
b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 04c3d9f7e323..7fdb6149047d 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -1017,9 +1017,17 @@ static void program_timing_sync(
}
}
  
-		/* set first pipe with plane as master */

+   /* set first unblanked pipe as master */
for (j = 0; j < group_size; j++) {
-   if (pipe_set[j]->plane_state) {
+   bool is_blanked;
+
+   if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
+   is_blanked =
+   
pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp);
+   else
+   is_blanked =
+   
pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg);
+   if (!is_blanked) {
if (j == 0)
break;
  
@@ -1040,9 +1048,17 @@ static void program_timing_sync(

status->timing_sync_info.master = false;
  
  		}

-   /* remove any other pipes with plane as they have already been 
synced */
+   /* remove any other unblanked pipes as they have already been 
synced */
for (j = j + 1; j < group_size; j++) {
-   if (pipe_set[j]->plane_state) {
+   bool is_blanked;
+
+   if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
+   is_blanked =
+   
pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp);
+   else
+   is_blanked =
+   
pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg);
+   if (!is_blanked) {
group_size--;
pipe_set[j] = pipe_set[group_size];
j--;



___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amdgpu/display: fix build without CONFIG_DRM_AMD_DC_DCN3_0

2020-06-03 Thread Kazlauskas, Nicholas

On 2020-06-03 2:00 p.m., Alex Deucher wrote:

Need to guard some new DCN3.0 stuff.

Signed-off-by: Alex Deucher 


Reviewed-by: Nicholas Kazlauskas 

Thanks!

Nicholas Kazlauskas


---
  drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c | 5 -
  1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c 
b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
index 43d0b4e53b5d..2972392f9788 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
@@ -288,13 +288,16 @@ void optc1_program_timing(
if (optc1_is_two_pixels_per_containter(&patched_crtc_timing) || 
optc1->opp_count == 2)
h_div = H_TIMING_DIV_BY2;
  
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)

if (optc1->tg_mask->OTG_H_TIMING_DIV_MODE != 0) {
if (optc1->opp_count == 4)
h_div = H_TIMING_DIV_BY4;
  
  		REG_UPDATE(OTG_H_TIMING_CNTL,

OTG_H_TIMING_DIV_MODE, h_div);
-   } else {
+   } else
+#endif
+   {
REG_UPDATE(OTG_H_TIMING_CNTL,
OTG_H_TIMING_DIV_BY2, h_div);
}



___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 1/1] drm/amd/display: fix compilation error on allmodconfig

2020-06-19 Thread Kazlauskas, Nicholas

On 2020-06-19 11:27 a.m., Alex Deucher wrote:

On Thu, Jun 18, 2020 at 3:49 PM Qingqing Zhuo  wrote:


when compiled with allmodconfig option, there are error
messages as below:

ERROR: modpost:
"mod_color_is_table_init"
[drivers/gpu/drm/amd/amdgpu/amdgpu.ko] undefined!
ERROR: modpost:
"mod_color_get_table"
[drivers/gpu/drm/amd/amdgpu/amdgpu.ko] undefined!
ERROR: modpost:
"mod_color_set_table_init_state"
[drivers/gpu/drm/amd/amdgpu/amdgpu.ko] undefined!

To fix the issue, this commits removes
CONFIG_DRM_AMD_DC_DCN guard in color/makefile.

Signed-off-by: Qingqing Zhuo 
CC: Lewis Huang 
CC: Aric Cyr 
CC: Alexander Deucher 
CC: Harry Wentland 
CC: Nicholas Kazlauskas 
CC: Bhawanpreet Lakha 
CC: Stephen Rothwell 


Acked-by: Alex Deucher 


Reviewed-by: Nicholas Kazlauskas 




---
  drivers/gpu/drm/amd/display/modules/color/Makefile | 6 +-
  1 file changed, 1 insertion(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/modules/color/Makefile 
b/drivers/gpu/drm/amd/display/modules/color/Makefile
index 3ee7f27ff93b..e66c19a840c2 100644
--- a/drivers/gpu/drm/amd/display/modules/color/Makefile
+++ b/drivers/gpu/drm/amd/display/modules/color/Makefile
@@ -23,11 +23,7 @@
  # Makefile for the color sub-module of DAL.
  #

-MOD_COLOR = color_gamma.o
-
-ifdef CONFIG_DRM_AMD_DC_DCN
-MOD_COLOR += color_table.o
-endif
+MOD_COLOR = color_gamma.o color_table.o

  AMD_DAL_MOD_COLOR = $(addprefix $(AMDDALPATH)/modules/color/,$(MOD_COLOR))
  #$(info   DAL COLOR MODULE MAKEFILE )
--
2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 04/11] drm/amd/display: Handle SMU msg response

2020-06-26 Thread Kazlauskas, Nicholas

On 2020-06-26 12:48 p.m., Alex Deucher wrote:

On Fri, Jun 26, 2020 at 12:19 PM Eryk Brol  wrote:


From: Yongqiang Sun 

[Why]
SMU may return error code to driver, but driver only check if response
is OK.

[How]
Check SMU response instead of reg_wait, assert in case of reponse isn't
OK.


Will you ever get concurrent calls to these interfaces or do you
already have a higher level lock to prevent that?  You need to make
sure you don't have multiple threads using these interfaces at the
same time or you'll need locking to protect the message, param, and
response registers.

Alex


IIRC this is a dedicated message port for DCN. I think DC locking 
prevents this from happening on the software side and HW/firmware has a 
different port.


Regards,
Nicholas Kazlauskas





Signed-off-by: Yongqiang Sun 
Reviewed-by: Tony Cheng 
Acked-by: Eryk Brol 
---
  .../dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c  | 39 +-
  .../dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c   | 40 ++-
  2 files changed, 75 insertions(+), 4 deletions(-)

diff --git 
a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c 
b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c
index c320b7af7d34..dbc7cde00433 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c
@@ -26,6 +26,7 @@
  #include "core_types.h"
  #include "clk_mgr_internal.h"
  #include "reg_helper.h"
+#include 

  #define MAX_INSTANCE   5
  #define MAX_SEGMENT5
@@ -68,10 +69,42 @@ static const struct IP_BASE MP1_BASE  = { { { { 0x00016000, 
0, 0, 0, 0 } },
  #define VBIOSSMC_MSG_SetDispclkFreq   0x4
  #define VBIOSSMC_MSG_SetDprefclkFreq  0x5

+#define VBIOSSMC_Status_BUSY  0x0
+#define VBIOSSMC_Result_OK0x1
+#define VBIOSSMC_Result_Failed0xFF
+#define VBIOSSMC_Result_UnknownCmd0xFE
+#define VBIOSSMC_Result_CmdRejectedPrereq 0xFD
+#define VBIOSSMC_Result_CmdRejectedBusy   0xFC
+
+/*
+ * Function to be used instead of REG_WAIT macro because the wait ends when
+ * the register is NOT EQUAL to zero, and because the translation in msg_if.h
+ * won't work with REG_WAIT.
+ */
+static uint32_t rv1_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, 
unsigned int delay_us, unsigned int max_retries)
+{
+   uint32_t res_val = VBIOSSMC_Status_BUSY;
+
+   do {
+   res_val = REG_READ(MP1_SMN_C2PMSG_91);
+   if (res_val != VBIOSSMC_Status_BUSY)
+   break;
+
+   if (delay_us >= 1000)
+   msleep(delay_us/1000);
+   else if (delay_us > 0)
+   udelay(delay_us);
+   } while (max_retries--);
+
+   return res_val;
+}
+
  int rv1_vbios_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, 
unsigned int msg_id, unsigned int param)
  {
+   uint32_t result;
+
 /* First clear response register */
-   REG_WRITE(MP1_SMN_C2PMSG_91, 0);
+   REG_WRITE(MP1_SMN_C2PMSG_91, VBIOSSMC_Status_BUSY);

 /* Set the parameter register for the SMU message, unit is Mhz */
 REG_WRITE(MP1_SMN_C2PMSG_83, param);
@@ -79,7 +112,9 @@ int rv1_vbios_smu_send_msg_with_param(struct 
clk_mgr_internal *clk_mgr, unsigned
 /* Trigger the message transaction by writing the message ID */
 REG_WRITE(MP1_SMN_C2PMSG_67, msg_id);

-   REG_WAIT(MP1_SMN_C2PMSG_91, CONTENT, 1, 10, 20);
+   result = rv1_smu_wait_for_response(clk_mgr, 10, 1000);
+
+   ASSERT(result == VBIOSSMC_Result_OK);

 /* Actual dispclk set is returned in the parameter register */
 return REG_READ(MP1_SMN_C2PMSG_83);
diff --git 
a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c 
b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
index 6878aedf1d3e..d2facbb114d3 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
@@ -26,6 +26,7 @@
  #include "core_types.h"
  #include "clk_mgr_internal.h"
  #include "reg_helper.h"
+#include 

  #include "renoir_ip_offset.h"

@@ -53,10 +54,43 @@
  #define VBIOSSMC_MSG_EnableTmdp48MHzRefclkPwrDown 0xD
  #define VBIOSSMC_MSG_UpdatePmeRestore0xE

+#define VBIOSSMC_Status_BUSY  0x0
+#define VBIOSSMC_Result_OK0x1
+#define VBIOSSMC_Result_Failed0xFF
+#define VBIOSSMC_Result_UnknownCmd0xFE
+#define VBIOSSMC_Result_CmdRejectedPrereq 0xFD
+#define VBIOSSMC_Result_CmdRejectedBusy   0xFC
+
+/*
+ * Function to be used instead of REG_WAIT macro because the wait ends when
+ * the register is NOT EQUAL to zero, and because the translation in msg_if.h
+ * won't work with REG_WAIT.
+ */
+static uint32_t rn_smu_wait_for_res

Re: [PATCH] Revert "drm/amd/display: Revalidate bandwidth before commiting DC updates"

2020-06-29 Thread Kazlauskas, Nicholas

On 2020-06-29 11:36 a.m., Alex Deucher wrote:

Seems to cause stability issues for some users.

This reverts commit a24eaa5c51255b344d5a321f1eeb3205f2775498.

Bug: https://gitlab.freedesktop.org/drm/amd/-/issues/1191
Signed-off-by: Alex Deucher 


I don't see the error in their log. How do we know this commit is 
actually causing the issue?


I don't really want to revert this commit because it means that we 
missed rejecting a commit during atomic check, meaning we're either 
going to get underflow on the screen or a pipe hang.


Regards,
Nicholas Kazlauskas


---
  drivers/gpu/drm/amd/display/dc/core/dc.c | 6 --
  1 file changed, 6 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c 
b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 67402d75e67e..94230bb3195d 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -2607,12 +2607,6 @@ void dc_commit_updates_for_stream(struct dc *dc,
  
  	copy_stream_update_to_stream(dc, context, stream, stream_update);
  
-	if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {

-   DC_ERROR("Mode validation failed for stream update!\n");
-   dc_release_state(context);
-   return;
-   }
-
commit_planes_for_stream(
dc,
srf_updates,



___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] Revert "drm/amd/display: Revalidate bandwidth before commiting DC updates"

2020-06-29 Thread Kazlauskas, Nicholas

On 2020-06-29 11:40 a.m., Kazlauskas, Nicholas wrote:

On 2020-06-29 11:36 a.m., Alex Deucher wrote:

Seems to cause stability issues for some users.

This reverts commit a24eaa5c51255b344d5a321f1eeb3205f2775498.

Bug: 
https://gitlab.freedesktop.org/drm/amd/-/issues/1191 


Signed-off-by: Alex Deucher 


I don't see the error in their log. How do we know this commit is 
actually causing the issue?


I don't really want to revert this commit because it means that we 
missed rejecting a commit during atomic check, meaning we're either 
going to get underflow on the screen or a pipe hang.


Regards,
Nicholas Kazlauskas


Actually, we only want to be doing this on full updates - it's not a 
very fast operation. Not sure if this is causing their stability issue 
though.


Regards,
Nicholas Kazlauskas




---
  drivers/gpu/drm/amd/display/dc/core/dc.c | 6 --
  1 file changed, 6 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c 
b/drivers/gpu/drm/amd/display/dc/core/dc.c

index 67402d75e67e..94230bb3195d 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -2607,12 +2607,6 @@ void dc_commit_updates_for_stream(struct dc *dc,
  copy_stream_update_to_stream(dc, context, stream, stream_update);
-    if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
-    DC_ERROR("Mode validation failed for stream update!\n");
-    dc_release_state(context);
-    return;
-    }
-
  commit_planes_for_stream(
  dc,
  srf_updates,



___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx 



___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amd/display: add dmcub check on RENOIR

2020-07-08 Thread Kazlauskas, Nicholas

Looks good to me.

Reviewed-by: Nicholas Kazlauskas 

Regards,
Nicholas Kazlauskas

On 2020-07-08 10:15 a.m., Deucher, Alexander wrote:

[AMD Public Use]


[AMD Public Use]


Acked-by: Alex Deucher 

*From:* Aaron Ma 
*Sent:* Wednesday, July 8, 2020 4:16 AM
*To:* Wentland, Harry ; Li, Sun peng (Leo) 
; Deucher, Alexander ; 
Koenig, Christian ; airl...@linux.ie 
; dan...@ffwll.ch ; 
amd-gfx@lists.freedesktop.org ; 
dri-de...@lists.freedesktop.org ; 
linux-ker...@vger.kernel.org ; 
mapen...@gmail.com ; aaron...@canonical.com 


*Subject:* [PATCH] drm/amd/display: add dmcub check on RENOIR
RENOIR loads dmub fw not dmcu, check dmcu only will prevent loading iram,
it breaks backlight control.

Bug: 
https://bugzilla.kernel.org/show_bug.cgi?id=208277 


Signed-off-by: Aaron Ma 
---
  drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 2 +-
  1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c

index 10ac8076d4f2..db5e0bb0d935 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1358,7 +1358,7 @@ static int dm_late_init(void *handle)
  struct dmcu *dmcu = NULL;
  bool ret;

-   if (!adev->dm.fw_dmcu)
+   if (!adev->dm.fw_dmcu && !adev->dm.dmub_fw)
  return detect_mst_link_for_all_connectors(adev->ddev);

  dmcu = adev->dm.dc->res_pool->dmcu;
--
2.25.1


___
dri-devel mailing list
dri-de...@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel



___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amdgpu/display: create fake mst encoders ahead of time (v2)

2020-07-10 Thread Kazlauskas, Nicholas

On 2020-07-10 9:58 a.m., Alex Deucher wrote:

Prevents a warning in the MST create connector case.

v2: create global fake encoders rather per connector fake encoders
to avoid running out of encoder indices.

Bug: https://gitlab.freedesktop.org/drm/amd/-/issues/1108
Fixes: c6385e503aeaf9 ("drm/amdgpu: drop legacy drm load and unload callbacks")
Signed-off-by: Alex Deucher 


I thought it was rather odd that the last patch was creating 6 per 
connector even though we were only using one.


Makes a lot more sense to be on the adev instead.

Reviewed-by: Nicholas Kazlauskas 

Regards,
Nicholas Kazlauskas


---
  .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c |  9 
  .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h | 11 +++-
  .../display/amdgpu_dm/amdgpu_dm_mst_types.c   | 53 +--
  .../display/amdgpu_dm/amdgpu_dm_mst_types.h   |  3 ++
  4 files changed, 48 insertions(+), 28 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 0e5d99a85307..74d8e61f30e4 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -978,6 +978,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
/* Update the actual used number of crtc */
adev->mode_info.num_crtc = adev->dm.display_indexes_num;
  
+	/* create fake encoders for MST */

+   dm_dp_create_fake_mst_encoders(adev);
+
/* TODO: Add_display_info? */
  
  	/* TODO use dynamic cursor width */

@@ -1001,6 +1004,12 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
  
  static void amdgpu_dm_fini(struct amdgpu_device *adev)

  {
+   int i;
+
+   for (i = 0; i < AMDGPU_DM_MAX_CRTC; i++) {
+   drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
+   }
+
amdgpu_dm_audio_fini(adev);
  
  	amdgpu_dm_destroy_drm_device(&adev->dm);

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 86c132ddc452..3f50328fe537 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -43,6 +43,9 @@
   */
  
  #define AMDGPU_DM_MAX_DISPLAY_INDEX 31

+
+#define AMDGPU_DM_MAX_CRTC 6
+
  /*
  #include "include/amdgpu_dal_power_if.h"
  #include "amdgpu_dm_irq.h"
@@ -330,6 +333,13 @@ struct amdgpu_display_manager {
 * available in FW
 */
const struct gpu_info_soc_bounding_box_v1_0 *soc_bounding_box;
+
+   /**
+* @mst_encoders:
+*
+* fake encoders used for DP MST.
+*/
+   struct amdgpu_encoder mst_encoders[AMDGPU_DM_MAX_CRTC];
  };
  
  struct amdgpu_dm_connector {

@@ -358,7 +368,6 @@ struct amdgpu_dm_connector {
struct amdgpu_dm_dp_aux dm_dp_aux;
struct drm_dp_mst_port *port;
struct amdgpu_dm_connector *mst_port;
-   struct amdgpu_encoder *mst_encoder;
struct drm_dp_aux *dsc_aux;
  
  	/* TODO see if we can merge with ddc_bus or make a dm_connector */

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index cf15248739f7..176973da18ef 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -95,7 +95,6 @@ dm_dp_mst_connector_destroy(struct drm_connector *connector)
  {
struct amdgpu_dm_connector *aconnector =
to_amdgpu_dm_connector(connector);
-   struct amdgpu_encoder *amdgpu_encoder = aconnector->mst_encoder;
  
  	if (aconnector->dc_sink) {

dc_link_remove_remote_sink(aconnector->dc_link,
@@ -105,8 +104,6 @@ dm_dp_mst_connector_destroy(struct drm_connector *connector)
  
  	kfree(aconnector->edid);
  
-	drm_encoder_cleanup(&amdgpu_encoder->base);

-   kfree(amdgpu_encoder);
drm_connector_cleanup(connector);
drm_dp_mst_put_port_malloc(aconnector->port);
kfree(aconnector);
@@ -243,7 +240,11 @@ static struct drm_encoder *
  dm_mst_atomic_best_encoder(struct drm_connector *connector,
   struct drm_connector_state *connector_state)
  {
-   return &to_amdgpu_dm_connector(connector)->mst_encoder->base;
+   struct drm_device *dev = connector->dev;
+   struct amdgpu_device *adev = dev->dev_private;
+   struct amdgpu_crtc *acrtc = to_amdgpu_crtc(connector_state->crtc);
+
+   return &adev->dm.mst_encoders[acrtc->crtc_id].base;
  }
  
  static int

@@ -306,31 +307,27 @@ static const struct drm_encoder_funcs 
amdgpu_dm_encoder_funcs = {
.destroy = amdgpu_dm_encoder_destroy,
  };
  
-static struct amdgpu_encoder *

-dm_dp_create_fake_mst_encoder(struct amdgpu_dm_connector *connector)
+void
+dm_dp_create_fake_mst_encoders(struct amdgpu_device *adev)
  {
-   struct drm_device *dev = connector->base.dev;
-   struct amdgpu_device *adev = dev->dev_private;
-   

  1   2   3   4   >