Re: [PATCH v7 1/9] drm_dp_cec: add connector info support.

2019-08-22 Thread Hans Verkuil
Alex, Ville/Rodrigo, Ben,

Can you (hopefully) Ack this patch so that I can merge it?

Thank you!

Hans

On 8/14/19 12:44 PM, Dariusz Marcinkiewicz wrote:
> Pass the connector info to the CEC adapter. This makes it possible
> to associate the CEC adapter with the corresponding drm connector.
> 
> Signed-off-by: Dariusz Marcinkiewicz 
> Signed-off-by: Hans Verkuil 
> Tested-by: Hans Verkuil 
> ---
>  .../display/amdgpu_dm/amdgpu_dm_mst_types.c   |  2 +-
>  drivers/gpu/drm/drm_dp_cec.c  | 25 ---
>  drivers/gpu/drm/i915/display/intel_dp.c   |  4 +--
>  drivers/gpu/drm/nouveau/nouveau_connector.c   |  3 +--
>  include/drm/drm_dp_helper.h   | 17 ++---
>  5 files changed, 27 insertions(+), 24 deletions(-)
> 
> diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c 
> b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
> index 16218a202b591..5ec14efd4d8cb 100644
> --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
> +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
> @@ -416,7 +416,7 @@ void amdgpu_dm_initialize_dp_connector(struct 
> amdgpu_display_manager *dm,
>  
>   drm_dp_aux_register(&aconnector->dm_dp_aux.aux);
>   drm_dp_cec_register_connector(&aconnector->dm_dp_aux.aux,
> -   aconnector->base.name, dm->adev->dev);
> +   &aconnector->base);
>   aconnector->mst_mgr.cbs = &dm_mst_cbs;
>   drm_dp_mst_topology_mgr_init(
>   &aconnector->mst_mgr,
> diff --git a/drivers/gpu/drm/drm_dp_cec.c b/drivers/gpu/drm/drm_dp_cec.c
> index b15cee85b702b..b457c16c3a8bb 100644
> --- a/drivers/gpu/drm/drm_dp_cec.c
> +++ b/drivers/gpu/drm/drm_dp_cec.c
> @@ -8,7 +8,9 @@
>  #include 
>  #include 
>  #include 
> +#include 
>  #include 
> +#include 
>  #include 
>  
>  /*
> @@ -295,7 +297,10 @@ static void drm_dp_cec_unregister_work(struct 
> work_struct *work)
>   */
>  void drm_dp_cec_set_edid(struct drm_dp_aux *aux, const struct edid *edid)
>  {
> - u32 cec_caps = CEC_CAP_DEFAULTS | CEC_CAP_NEEDS_HPD;
> + struct drm_connector *connector = aux->cec.connector;
> + u32 cec_caps = CEC_CAP_DEFAULTS | CEC_CAP_NEEDS_HPD |
> +CEC_CAP_CONNECTOR_INFO;
> + struct cec_connector_info conn_info;
>   unsigned int num_las = 1;
>   u8 cap;
>  
> @@ -344,13 +349,17 @@ void drm_dp_cec_set_edid(struct drm_dp_aux *aux, const 
> struct edid *edid)
>  
>   /* Create a new adapter */
>   aux->cec.adap = cec_allocate_adapter(&drm_dp_cec_adap_ops,
> -  aux, aux->cec.name, cec_caps,
> +  aux, connector->name, cec_caps,
>num_las);
>   if (IS_ERR(aux->cec.adap)) {
>   aux->cec.adap = NULL;
>   goto unlock;
>   }
> - if (cec_register_adapter(aux->cec.adap, aux->cec.parent)) {
> +
> + cec_fill_conn_info_from_drm(&conn_info, connector);
> + cec_s_conn_info(aux->cec.adap, &conn_info);
> +
> + if (cec_register_adapter(aux->cec.adap, connector->dev->dev)) {
>   cec_delete_adapter(aux->cec.adap);
>   aux->cec.adap = NULL;
>   } else {
> @@ -406,22 +415,20 @@ EXPORT_SYMBOL(drm_dp_cec_unset_edid);
>  /**
>   * drm_dp_cec_register_connector() - register a new connector
>   * @aux: DisplayPort AUX channel
> - * @name: name of the CEC device
> - * @parent: parent device
> + * @connector: drm connector
>   *
>   * A new connector was registered with associated CEC adapter name and
>   * CEC adapter parent device. After registering the name and parent
>   * drm_dp_cec_set_edid() is called to check if the connector supports
>   * CEC and to register a CEC adapter if that is the case.
>   */
> -void drm_dp_cec_register_connector(struct drm_dp_aux *aux, const char *name,
> -struct device *parent)
> +void drm_dp_cec_register_connector(struct drm_dp_aux *aux,
> +struct drm_connector *connector)
>  {
>   WARN_ON(aux->cec.adap);
>   if (WARN_ON(!aux->transfer))
>   return;
> - aux->cec.name = name;
> - aux->cec.parent = parent;
> + aux->cec.connector = connector;
>   INIT_DELAYED_WORK(&aux->cec.unregister_work,
> drm_dp_cec_unregister_work);
>  }
> diff --git a/drivers/gpu/drm/i915/display/intel_dp.c 
> b/drivers/gpu/drm/i915/display/intel_dp.c
> index 1092499115760..de2486fe7bf2d 100644
> --- a/drivers/gpu/drm/i915/display/intel_dp.c
> +++ b/drivers/gpu/drm/i915/display/intel_dp.c
> @@ -5497,7 +5497,6 @@ static int
>  intel_dp_connector_register(struct drm_connector *connector)
>  {
>   struct intel_dp *intel_dp = intel_attached_dp(connector);
> - struct drm_device *dev = connector->dev;
>   int ret;
>  
>   ret = intel_connector_register(connector);
> @@ -5512,8 +5511,7 @@ i

答复: [PATCH 2/3] amd/amdkfd: add Arcturus vf DID support

2019-08-22 Thread Min, Frank
Hi Alex,
Would you please help to review the kfd did add patch?

Best Regards,
Frank

-邮件原件-
发件人: Frank.Min  
发送时间: 2019年8月16日 16:59
收件人: amd-gfx@lists.freedesktop.org
抄送: Min, Frank 
主题: [PATCH 2/3] amd/amdkfd: add Arcturus vf DID support

Change-Id: I842cc31ab040b17dcc5765e275e5402df785b34a
Signed-off-by: Frank.Min 
---
 drivers/gpu/drm/amd/amdkfd/kfd_device.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c 
b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 3b9fe62..32b1cfa 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -472,6 +472,7 @@ static const struct kfd_deviceid supported_devices[] = {
{ 0x738C, &arcturus_device_info },  /* Arcturus */
{ 0x7388, &arcturus_device_info },  /* Arcturus */
{ 0x738E, &arcturus_device_info },  /* Arcturus */
+   { 0x7390, &arcturus_device_info },  /* Arcturus vf */
{ 0x7310, &navi10_device_info },/* Navi10 */
{ 0x7312, &navi10_device_info },/* Navi10 */
{ 0x7318, &navi10_device_info },/* Navi10 */
-- 
2.7.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

atombios stuck executing D850 when trying to switch to 4k@60Hz on Polaris10

2019-08-22 Thread Clemens Eisserer
Hi there,

I am trying to connect a LG 32UD59 UHD monitor to a MSI Armor RX570 4G
card via a HDMI2 (cheap) certified cable.
Unfourtunatly the setup only runs at 30Hz, whereas when booting
Windows it automatically selects 3840x2160@59Hz.

I played a bit with adding the modelines manually, however when
enabling those new modes the screen goes black and in syslog I find
the following entries:

[  571.174813] [drm:atom_op_jump [amdgpu]] *ERROR* atombios stuck in
loop for more than 5secs aborting
[  571.174862] [drm:amdgpu_atom_execute_table_locked [amdgpu]] *ERROR*
atombios stuck executing D850 (len 824, WS 0, PS 0) @ 0xD992
[  571.174908] [drm:amdgpu_atom_execute_table_locked [amdgpu]] *ERROR*
atombios stuck executing D70A (len 326, WS 0, PS 0) @ 0xD7A6

Xorg.0.log: https://pastebin.com/LmZ0bvyL
kernel log: https://pastebin.com/rXGVMTnV

Help would be really appreciated - I am rather latency sensitive and
those 30Hz are driving me nuts ;)

Best regards, Clemens
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 2/3] drm: drop resource_id parameter from drm_fb_helper_remove_conflicting_pci_framebuffers

2019-08-22 Thread Gerd Hoffmann
Not needed any more for remove_conflicting_pci_framebuffers calls.

Signed-off-by: Gerd Hoffmann 
---
 include/drm/drm_fb_helper.h | 4 +---
 drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 2 +-
 drivers/gpu/drm/bochs/bochs_drv.c   | 2 +-
 drivers/gpu/drm/cirrus/cirrus.c | 2 +-
 drivers/gpu/drm/mgag200/mgag200_drv.c   | 2 +-
 drivers/gpu/drm/qxl/qxl_drv.c   | 2 +-
 drivers/gpu/drm/radeon/radeon_drv.c | 2 +-
 drivers/gpu/drm/virtio/virtgpu_drv.c| 1 -
 8 files changed, 7 insertions(+), 10 deletions(-)

diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index 5a5f4b1d8241..8dcc012ccbc8 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -539,18 +539,16 @@ drm_fb_helper_remove_conflicting_framebuffers(struct 
apertures_struct *a,
 /**
  * drm_fb_helper_remove_conflicting_pci_framebuffers - remove 
firmware-configured framebuffers for PCI devices
  * @pdev: PCI device
- * @resource_id: index of PCI BAR configuring framebuffer memory
  * @name: requesting driver name
  *
  * This function removes framebuffer devices (eg. initialized by firmware)
- * using memory range configured for @pdev's BAR @resource_id.
+ * using memory range configured for any of @pdev's memory bars.
  *
  * The function assumes that PCI device with shadowed ROM drives a primary
  * display and so kicks out vga16fb.
  */
 static inline int
 drm_fb_helper_remove_conflicting_pci_framebuffers(struct pci_dev *pdev,
- int resource_id,
  const char *name)
 {
int ret = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 98df55534a6d..6b96a5738e57 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -1031,7 +1031,7 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
}
 
/* Get rid of things like offb */
-   ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, 
"amdgpudrmfb");
+   ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 
"amdgpudrmfb");
if (ret)
return ret;
 
diff --git a/drivers/gpu/drm/bochs/bochs_drv.c 
b/drivers/gpu/drm/bochs/bochs_drv.c
index 770e1625d05e..3b9b0d9bbc14 100644
--- a/drivers/gpu/drm/bochs/bochs_drv.c
+++ b/drivers/gpu/drm/bochs/bochs_drv.c
@@ -114,7 +114,7 @@ static int bochs_pci_probe(struct pci_dev *pdev,
return -ENOMEM;
}
 
-   ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, 
"bochsdrmfb");
+   ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 
"bochsdrmfb");
if (ret)
return ret;
 
diff --git a/drivers/gpu/drm/cirrus/cirrus.c b/drivers/gpu/drm/cirrus/cirrus.c
index 36a69aec8a4b..89d9e6fdeb8c 100644
--- a/drivers/gpu/drm/cirrus/cirrus.c
+++ b/drivers/gpu/drm/cirrus/cirrus.c
@@ -532,7 +532,7 @@ static int cirrus_pci_probe(struct pci_dev *pdev,
struct cirrus_device *cirrus;
int ret;
 
-   ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, 
"cirrusdrmfb");
+   ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 
"cirrusdrmfb");
if (ret)
return ret;
 
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c 
b/drivers/gpu/drm/mgag200/mgag200_drv.c
index afd9119b6cf1..4f9df3b93598 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -46,7 +46,7 @@ MODULE_DEVICE_TABLE(pci, pciidlist);
 
 static int mga_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
-   drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, 
"mgag200drmfb");
+   drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "mgag200drmfb");
 
return drm_get_pci_dev(pdev, ent, &driver);
 }
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index c1802e01d9f6..2b726a51a302 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -83,7 +83,7 @@ qxl_pci_probe(struct pci_dev *pdev, const struct 
pci_device_id *ent)
if (ret)
goto free_dev;
 
-   ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, "qxl");
+   ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "qxl");
if (ret)
goto disable_pci;
 
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c 
b/drivers/gpu/drm/radeon/radeon_drv.c
index a4a78dfdef37..624aa580d418 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -329,7 +329,7 @@ static int radeon_pci_probe(struct pci_dev *pdev,
return -EPROBE_DEFER;
 
/* Get rid of things like offb */
-   ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, 
"radeondrmfb");
+   ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 
"radeondrmfb");
if (ret)
re

Re: [PATCH 2/2] drm/radeon: use WAIT_REG_MEM special op for CP HDP flush

2019-08-22 Thread Christian König

Every time we actually tried this it just ended in users reporting CP hangs.

Christian.

Am 22.08.19 um 00:20 schrieb Alex Deucher:

Flush via the ring works differently on CIK and requires a
special sequence.

Signed-off-by: Alex Deucher 
---
  drivers/gpu/drm/radeon/cik.c | 73 +++-
  1 file changed, 45 insertions(+), 28 deletions(-)

diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 0847367..03dd075 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -3485,6 +3485,48 @@ int cik_ring_test(struct radeon_device *rdev, struct 
radeon_ring *ring)
return r;
  }
  
+static void cik_gfx_hdp_flush(struct radeon_device *rdev,

+ int ridx)
+{
+   struct radeon_ring *ring = &rdev->ring[ridx];
+   u32 ref_and_mask;
+
+   switch (ring->idx) {
+   case CAYMAN_RING_TYPE_CP1_INDEX:
+   case CAYMAN_RING_TYPE_CP2_INDEX:
+   switch (ring->me) {
+   case 0:
+   ref_and_mask = CP2 << ring->pipe;
+   break;
+   case 1:
+   ref_and_mask = CP6 << ring->pipe;
+   break;
+   default:
+   return;
+   }
+   break;
+   case RADEON_RING_TYPE_GFX_INDEX:
+   ref_and_mask = CP0;
+   break;
+   default:
+   return;
+   }
+
+   radeon_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
+   radeon_ring_write(ring, ((CP_WAIT_REG_MEM_TIMEOUT -
+ PACKET3_SET_UCONFIG_REG_START) >> 2));
+   radeon_ring_write(ring, 0xfff);
+
+   radeon_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
+   radeon_ring_write(ring, (WAIT_REG_MEM_OPERATION(1) | /* special op */
+WAIT_REG_MEM_FUNCTION(3))); /* == */
+   radeon_ring_write(ring, GPU_HDP_FLUSH_REQ >> 2);
+   radeon_ring_write(ring, GPU_HDP_FLUSH_DONE >> 2);
+   radeon_ring_write(ring, ref_and_mask);
+   radeon_ring_write(ring, ref_and_mask);
+   radeon_ring_write(ring, 0xa); /* poll interval */
+}
+
  /**
   * cik_fence_gfx_ring_emit - emit a fence on the gfx ring
   *
@@ -3511,15 +3553,7 @@ void cik_fence_gfx_ring_emit(struct radeon_device *rdev,
radeon_ring_write(ring, fence->seq);
radeon_ring_write(ring, 0);
/* HDP flush */
-   /* We should be using the new WAIT_REG_MEM special op packet here
-* but it causes the CP to hang
-*/
-   radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
-   radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
-WRITE_DATA_DST_SEL(0)));
-   radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
-   radeon_ring_write(ring, 0);
-   radeon_ring_write(ring, 0);
+   cik_gfx_hdp_flush(rdev, fence->ring);
  }
  
  /**

@@ -3549,15 +3583,7 @@ void cik_fence_compute_ring_emit(struct radeon_device 
*rdev,
radeon_ring_write(ring, fence->seq);
radeon_ring_write(ring, 0);
/* HDP flush */
-   /* We should be using the new WAIT_REG_MEM special op packet here
-* but it causes the CP to hang
-*/
-   radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
-   radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
-WRITE_DATA_DST_SEL(0)));
-   radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
-   radeon_ring_write(ring, 0);
-   radeon_ring_write(ring, 0);
+   cik_gfx_hdp_flush(rdev, fence->ring);
  }
  
  bool cik_semaphore_ring_emit(struct radeon_device *rdev,

@@ -5369,16 +5395,7 @@ void cik_vm_flush(struct radeon_device *rdev, int ridx, 
struct radeon_vm *vm)
radeon_ring_write(ring, VMID(0));
  
  	/* HDP flush */

-   /* We should be using the WAIT_REG_MEM packet here like in
-* cik_fence_ring_emit(), but it causes the CP to hang in this
-* context...
-*/
-   radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
-   radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
-WRITE_DATA_DST_SEL(0)));
-   radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
-   radeon_ring_write(ring, 0);
-   radeon_ring_write(ring, 0);
+   cik_gfx_hdp_flush(rdev, ridx);
  
  	/* bits 0-15 are the VM contexts0-15 */

radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));


___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

RE: [PATCH 1/3] amd/amdgpu: add Arcturus vf DID support

2019-08-22 Thread Xu, Feifei

Reviewed-by: Feifei Xu 

-Original Message-
From: amd-gfx  On Behalf Of Frank.Min
Sent: 2019年8月16日 16:59
To: amd-gfx@lists.freedesktop.org
Cc: Min, Frank 
Subject: [PATCH 1/3] amd/amdgpu: add Arcturus vf DID support

Change-Id: I7153153785fdd54a10ebc47e778e06982edc79d7
Signed-off-by: Frank.Min 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 0e8c165..3890ba2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -999,6 +999,7 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x738C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARCTURUS},
{0x1002, 0x7388, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARCTURUS},
{0x1002, 0x738E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARCTURUS},
+   {0x1002, 0x7390, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARCTURUS},
/* Navi10 */
{0x1002, 0x7310, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
{0x1002, 0x7312, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
-- 
2.7.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH] drm/amd/powerplay: update cached feature enablement status V2

2019-08-22 Thread Evan Quan
Need to update in cache feature enablement status after pp_feature
settings. Another fix for the commit below:
drm/amd/powerplay: implment sysfs feature status function in smu

V2: update smu_feature_update_enable_state() and relates

Change-Id: I90e29b0d839df26825d5993212f6097c7ad4bebf
Signed-off-by: Evan Quan 
---
 drivers/gpu/drm/amd/powerplay/amdgpu_smu.c| 104 +-
 .../gpu/drm/amd/powerplay/inc/amdgpu_smu.h|   1 -
 2 files changed, 52 insertions(+), 53 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c 
b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
index 4df7fb6eaf3c..3e1cd5d9c29e 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -94,6 +94,55 @@ size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, 
char *buf)
return size;
 }
 
+static int smu_feature_update_enable_state(struct smu_context *smu,
+  uint64_t feature_mask,
+  bool enabled)
+{
+   struct smu_feature *feature = &smu->smu_feature;
+   uint32_t feature_low = 0, feature_high = 0;
+   uint64_t feature_id;
+   int ret = 0;
+
+   if (!smu->pm_enabled)
+   return ret;
+
+   feature_low = (feature_mask >> 0 ) & 0x;
+   feature_high = (feature_mask >> 32) & 0x;
+
+   if (enabled) {
+   ret = smu_send_smc_msg_with_param(smu, 
SMU_MSG_EnableSmuFeaturesLow,
+ feature_low);
+   if (ret)
+   return ret;
+   ret = smu_send_smc_msg_with_param(smu, 
SMU_MSG_EnableSmuFeaturesHigh,
+ feature_high);
+   if (ret)
+   return ret;
+   } else {
+   ret = smu_send_smc_msg_with_param(smu, 
SMU_MSG_DisableSmuFeaturesLow,
+ feature_low);
+   if (ret)
+   return ret;
+   ret = smu_send_smc_msg_with_param(smu, 
SMU_MSG_DisableSmuFeaturesHigh,
+ feature_high);
+   if (ret)
+   return ret;
+   }
+
+   mutex_lock(&feature->mutex);
+   for (feature_id = 0; feature_id < 64; feature_id++) {
+   if (feature_mask & (1ULL << feature_id)) {
+   if (enabled)
+   test_and_set_bit(feature_id, feature->enabled);
+   else
+   test_and_clear_bit(feature_id, 
feature->enabled);
+   }
+   }
+   mutex_unlock(&feature->mutex);
+
+   return ret;
+}
+
 int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask)
 {
int ret = 0;
@@ -591,41 +640,7 @@ int smu_feature_init_dpm(struct smu_context *smu)
 
return ret;
 }
-int smu_feature_update_enable_state(struct smu_context *smu, uint64_t 
feature_mask, bool enabled)
-{
-   uint32_t feature_low = 0, feature_high = 0;
-   int ret = 0;
 
-   if (!smu->pm_enabled)
-   return ret;
-
-   feature_low = (feature_mask >> 0 ) & 0x;
-   feature_high = (feature_mask >> 32) & 0x;
-
-   if (enabled) {
-   ret = smu_send_smc_msg_with_param(smu, 
SMU_MSG_EnableSmuFeaturesLow,
- feature_low);
-   if (ret)
-   return ret;
-   ret = smu_send_smc_msg_with_param(smu, 
SMU_MSG_EnableSmuFeaturesHigh,
- feature_high);
-   if (ret)
-   return ret;
-
-   } else {
-   ret = smu_send_smc_msg_with_param(smu, 
SMU_MSG_DisableSmuFeaturesLow,
- feature_low);
-   if (ret)
-   return ret;
-   ret = smu_send_smc_msg_with_param(smu, 
SMU_MSG_DisableSmuFeaturesHigh,
- feature_high);
-   if (ret)
-   return ret;
-
-   }
-
-   return ret;
-}
 
 int smu_feature_is_enabled(struct smu_context *smu, enum smu_feature_mask mask)
 {
@@ -651,8 +666,6 @@ int smu_feature_set_enabled(struct smu_context *smu, enum 
smu_feature_mask mask,
 {
struct smu_feature *feature = &smu->smu_feature;
int feature_id;
-   uint64_t feature_mask = 0;
-   int ret = 0;
 
feature_id = smu_feature_get_index(smu, mask);
if (feature_id < 0)
@@ -660,22 +673,9 @@ int smu_feature_set_enabled(struct smu_context *smu, enum 
smu_feature_mask mask,
 
WARN_ON(feature_id > feature->feature_num);
 
-   feature_mask = 1ULL << feature_id;
-
-   mutex_lock(&feature->mutex);
-   ret = smu_feature_update_enable_state(smu, feature_mask, enable);
-   if (ret)
- 

RE: [PATCH] drm/amd/powerplay: update cached feature enablement status

2019-08-22 Thread Quan, Evan
Please check V2.

> -Original Message-
> From: amd-gfx  On Behalf Of
> Kevin Wang
> Sent: Wednesday, August 21, 2019 7:45 PM
> To: amd-gfx@lists.freedesktop.org
> Subject: Re: [PATCH] drm/amd/powerplay: update cached feature
> enablement status
> 
> Hi Evan,
> 
> this is know issue for me.
> i think we should add update feature mask cached operation into
> smu_feature_update_enable_state function.
> 
> Best Regards,
> Kevin
> 
> On 8/21/19 5:24 PM, Evan Quan wrote:
> > Need to update in cache feature enablement status after pp_feature
> > settings. Another fix for the commit below:
> > drm/amd/powerplay: implment sysfs feature status function in smu
> >
> > Change-Id: I90e29b0d839df26825d5993212f6097c7ad4bebf
> > Signed-off-by: Evan Quan 
> > ---
> >   drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 16
> 
> >   1 file changed, 16 insertions(+)
> >
> > diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> > b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> > index c663d25db5ab..04867cafb322 100644
> > --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> > +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> > @@ -96,11 +96,13 @@ size_t smu_sys_get_pp_feature_mask(struct
> > smu_context *smu, char *buf)
> >
> >   int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t
> new_mask)
> >   {
> > +   struct smu_feature *feature = &smu->smu_feature;
> > int ret = 0;
> > uint32_t feature_mask[2] = { 0 };
> > uint64_t feature_2_enabled = 0;
> > uint64_t feature_2_disabled = 0;
> > uint64_t feature_enables = 0;
> > +   uint64_t feature_id;
> >
> > ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
> > if (ret)
> > @@ -115,11 +117,25 @@ int smu_sys_set_pp_feature_mask(struct
> smu_context *smu, uint64_t new_mask)
> > ret = smu_feature_update_enable_state(smu,
> feature_2_enabled, true);
> > if (ret)
> > return ret;
> > +
> > +   mutex_lock(&feature->mutex);
> > +   for (feature_id = 0; feature_id < 64; feature_id++) {
> > +   if (feature_2_enabled & (1ULL << feature_id))
> > +   test_and_set_bit(feature_id, feature-
> >enabled);
> > +   }
> > +   mutex_unlock(&feature->mutex);
> > }
> > if (feature_2_disabled) {
> > ret = smu_feature_update_enable_state(smu,
> feature_2_disabled, false);
> > if (ret)
> > return ret;
> > +
> > +   mutex_lock(&feature->mutex);
> > +   for (feature_id = 0; feature_id < 64; feature_id++) {
> > +   if (feature_2_disabled & (1ULL << feature_id))
> > +   test_and_clear_bit(feature_id, feature-
> >enabled);
> > +   }
> > +   mutex_unlock(&feature->mutex);
> > }
> >
> > return ret;
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

RE: [PATCH] drm/amd/powerplay: update cached feature enablement status

2019-08-22 Thread Quan, Evan
Please check V2.

> -Original Message-
> From: amd-gfx  On Behalf Of
> Kevin Wang
> Sent: Wednesday, August 21, 2019 7:45 PM
> To: amd-gfx@lists.freedesktop.org
> Subject: Re: [PATCH] drm/amd/powerplay: update cached feature
> enablement status
> 
> Hi Evan,
> 
> this is know issue for me.
> i think we should add update feature mask cached operation into
> smu_feature_update_enable_state function.
> 
> Best Regards,
> Kevin
> 
> On 8/21/19 5:24 PM, Evan Quan wrote:
> > Need to update in cache feature enablement status after pp_feature
> > settings. Another fix for the commit below:
> > drm/amd/powerplay: implment sysfs feature status function in smu
> >
> > Change-Id: I90e29b0d839df26825d5993212f6097c7ad4bebf
> > Signed-off-by: Evan Quan 
> > ---
> >   drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 16
> 
> >   1 file changed, 16 insertions(+)
> >
> > diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> > b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> > index c663d25db5ab..04867cafb322 100644
> > --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> > +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> > @@ -96,11 +96,13 @@ size_t smu_sys_get_pp_feature_mask(struct
> > smu_context *smu, char *buf)
> >
> >   int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t
> new_mask)
> >   {
> > +   struct smu_feature *feature = &smu->smu_feature;
> > int ret = 0;
> > uint32_t feature_mask[2] = { 0 };
> > uint64_t feature_2_enabled = 0;
> > uint64_t feature_2_disabled = 0;
> > uint64_t feature_enables = 0;
> > +   uint64_t feature_id;
> >
> > ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
> > if (ret)
> > @@ -115,11 +117,25 @@ int smu_sys_set_pp_feature_mask(struct
> smu_context *smu, uint64_t new_mask)
> > ret = smu_feature_update_enable_state(smu,
> feature_2_enabled, true);
> > if (ret)
> > return ret;
> > +
> > +   mutex_lock(&feature->mutex);
> > +   for (feature_id = 0; feature_id < 64; feature_id++) {
> > +   if (feature_2_enabled & (1ULL << feature_id))
> > +   test_and_set_bit(feature_id, feature-
> >enabled);
> > +   }
> > +   mutex_unlock(&feature->mutex);
> > }
> > if (feature_2_disabled) {
> > ret = smu_feature_update_enable_state(smu,
> feature_2_disabled, false);
> > if (ret)
> > return ret;
> > +
> > +   mutex_lock(&feature->mutex);
> > +   for (feature_id = 0; feature_id < 64; feature_id++) {
> > +   if (feature_2_disabled & (1ULL << feature_id))
> > +   test_and_clear_bit(feature_id, feature-
> >enabled);
> > +   }
> > +   mutex_unlock(&feature->mutex);
> > }
> >
> > return ret;
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

RE: [PATCH 2/2] drm/amd/powerplay: correct the pp_feature output on Arcturus

2019-08-22 Thread Quan, Evan
Ping..

> -Original Message-
> From: Evan Quan 
> Sent: Wednesday, August 21, 2019 4:42 PM
> To: amd-gfx@lists.freedesktop.org
> Cc: Quan, Evan 
> Subject: [PATCH 2/2] drm/amd/powerplay: correct the pp_feature output on
> Arcturus
> 
> Fix for the commit below:
> drm/amd/powerplay: implment sysfs feature status function in smu
> 
> Change-Id: Id9a373f8d8866b97450be0aef0ba19d0835d40d8
> Signed-off-by: Evan Quan 
> ---
>  drivers/gpu/drm/amd/powerplay/arcturus_ppt.c  | 2 ++
> drivers/gpu/drm/amd/powerplay/inc/smu_types.h | 1 +
>  2 files changed, 3 insertions(+)
> 
> diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
> b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
> index dd6563358e8e..f1f072012fac 100644
> --- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
> +++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
> @@ -141,6 +141,7 @@ static struct smu_11_0_cmn2aisc_mapping
> arcturus_feature_mask_map[SMU_FEATURE_CO
>   FEA_MAP(DPM_SOCCLK),
>   FEA_MAP(DPM_FCLK),
>   FEA_MAP(DPM_MP0CLK),
> + ARCTURUS_FEA_MAP(SMU_FEATURE_XGMI_BIT,
> FEATURE_DPM_XGMI_BIT),
>   FEA_MAP(DS_GFXCLK),
>   FEA_MAP(DS_SOCCLK),
>   FEA_MAP(DS_LCLK),
> @@ -149,6 +150,7 @@ static struct smu_11_0_cmn2aisc_mapping
> arcturus_feature_mask_map[SMU_FEATURE_CO
>   FEA_MAP(GFX_ULV),
>   ARCTURUS_FEA_MAP(SMU_FEATURE_VCN_PG_BIT,
> FEATURE_DPM_VCN_BIT),
>   FEA_MAP(RSMU_SMN_CG),
> + FEA_MAP(WAFL_CG),
>   FEA_MAP(PPT),
>   FEA_MAP(TDC),
>   FEA_MAP(APCC_PLUS),
> diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_types.h
> b/drivers/gpu/drm/amd/powerplay/inc/smu_types.h
> index 72962e842d69..c3c74098f614 100644
> --- a/drivers/gpu/drm/amd/powerplay/inc/smu_types.h
> +++ b/drivers/gpu/drm/amd/powerplay/inc/smu_types.h
> @@ -207,6 +207,7 @@ enum smu_clk_type {
> __SMU_DUMMY_MAP(TEMP_DEPENDENT_VMIN), \
> __SMU_DUMMY_MAP(MMHUB_PG),\
> __SMU_DUMMY_MAP(ATHUB_PG),\
> +   __SMU_DUMMY_MAP(WAFL_CG),
> 
>  #undef __SMU_DUMMY_MAP
>  #define __SMU_DUMMY_MAP(feature)
>   SMU_FEATURE_##feature##_BIT
> --
> 2.23.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

RE: [PATCH 1/2] drm/amd/powerplay: correct Vega20 dpm level related settings

2019-08-22 Thread Quan, Evan
Ping..

> -Original Message-
> From: Evan Quan 
> Sent: Wednesday, August 21, 2019 4:42 PM
> To: amd-gfx@lists.freedesktop.org
> Cc: Quan, Evan 
> Subject: [PATCH 1/2] drm/amd/powerplay: correct Vega20 dpm level related
> settings
> 
> Correct the settings for auto mode and skip the unnecessary settings for
> dcefclk and fclk.
> 
> Change-Id: I7e6ca75ce86b4d5cd44920a9fbc71b6f36ea3c49
> Signed-off-by: Evan Quan 
> ---
>  .../drm/amd/powerplay/hwmgr/vega20_hwmgr.c| 60
> +--
>  1 file changed, 54 insertions(+), 6 deletions(-)
> 
> diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
> b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
> index 0516c294b377..cc52d5c8ccf9 100644
> --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
> +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
> @@ -2349,12 +2349,16 @@ static int vega20_force_dpm_highest(struct
> pp_hwmgr *hwmgr)
>   data->dpm_table.soc_table.dpm_state.soft_max_level =
>   data->dpm_table.soc_table.dpm_levels[soft_level].value;
> 
> - ret = vega20_upload_dpm_min_level(hwmgr, 0x);
> + ret = vega20_upload_dpm_min_level(hwmgr,
> FEATURE_DPM_GFXCLK_MASK |
> +  FEATURE_DPM_UCLK_MASK
> |
> +
> FEATURE_DPM_SOCCLK_MASK);
>   PP_ASSERT_WITH_CODE(!ret,
>   "Failed to upload boot level to highest!",
>   return ret);
> 
> - ret = vega20_upload_dpm_max_level(hwmgr, 0x);
> + ret = vega20_upload_dpm_max_level(hwmgr,
> FEATURE_DPM_GFXCLK_MASK |
> +  FEATURE_DPM_UCLK_MASK
> |
> +
> FEATURE_DPM_SOCCLK_MASK);
>   PP_ASSERT_WITH_CODE(!ret,
>   "Failed to upload dpm max level to highest!",
>   return ret);
> @@ -2387,12 +2391,16 @@ static int vega20_force_dpm_lowest(struct
> pp_hwmgr *hwmgr)
>   data->dpm_table.soc_table.dpm_state.soft_max_level =
>   data->dpm_table.soc_table.dpm_levels[soft_level].value;
> 
> - ret = vega20_upload_dpm_min_level(hwmgr, 0x);
> + ret = vega20_upload_dpm_min_level(hwmgr,
> FEATURE_DPM_GFXCLK_MASK |
> +  FEATURE_DPM_UCLK_MASK
> |
> +
> FEATURE_DPM_SOCCLK_MASK);
>   PP_ASSERT_WITH_CODE(!ret,
>   "Failed to upload boot level to highest!",
>   return ret);
> 
> - ret = vega20_upload_dpm_max_level(hwmgr, 0x);
> + ret = vega20_upload_dpm_max_level(hwmgr,
> FEATURE_DPM_GFXCLK_MASK |
> +  FEATURE_DPM_UCLK_MASK
> |
> +
> FEATURE_DPM_SOCCLK_MASK);
>   PP_ASSERT_WITH_CODE(!ret,
>   "Failed to upload dpm max level to highest!",
>   return ret);
> @@ -2403,14 +2411,54 @@ static int vega20_force_dpm_lowest(struct
> pp_hwmgr *hwmgr)
> 
>  static int vega20_unforce_dpm_levels(struct pp_hwmgr *hwmgr)  {
> + struct vega20_hwmgr *data =
> + (struct vega20_hwmgr *)(hwmgr->backend);
> + uint32_t soft_min_level, soft_max_level;
>   int ret = 0;
> 
> - ret = vega20_upload_dpm_min_level(hwmgr, 0x);
> + /* gfxclk soft min/max settings */
> + soft_min_level =
> + vega20_find_lowest_dpm_level(&(data-
> >dpm_table.gfx_table));
> + soft_max_level =
> + vega20_find_highest_dpm_level(&(data-
> >dpm_table.gfx_table));
> +
> + data->dpm_table.gfx_table.dpm_state.soft_min_level =
> + data-
> >dpm_table.gfx_table.dpm_levels[soft_min_level].value;
> + data->dpm_table.gfx_table.dpm_state.soft_max_level =
> + data-
> >dpm_table.gfx_table.dpm_levels[soft_max_level].value;
> +
> + /* uclk soft min/max settings */
> + soft_min_level =
> + vega20_find_lowest_dpm_level(&(data-
> >dpm_table.mem_table));
> + soft_max_level =
> + vega20_find_highest_dpm_level(&(data-
> >dpm_table.mem_table));
> +
> + data->dpm_table.mem_table.dpm_state.soft_min_level =
> + data-
> >dpm_table.mem_table.dpm_levels[soft_min_level].value;
> + data->dpm_table.mem_table.dpm_state.soft_max_level =
> + data-
> >dpm_table.mem_table.dpm_levels[soft_max_level].value;
> +
> + /* socclk soft min/max settings */
> + soft_min_level =
> + vega20_find_lowest_dpm_level(&(data-
> >dpm_table.soc_table));
> + soft_max_level =
> + vega20_find_highest_dpm_level(&(data-
> >dpm_table.soc_table));
> +
> + data->dpm_table.soc_table.dpm_state.soft_min_level =
> + data-
> >dpm_table.soc_table.dpm_levels[soft_min_level].value;
> + data->dpm_table.soc_table.dpm_state.soft_max_level =
> + data-
> >dpm_table.soc_table.dpm_levels[soft_max_level].value;
> +
> + ret = vega20_upload_dpm_min_level(hwmgr,
> FEATURE_DPM_GFXCLK_MASK |
> +  FEATURE_DP

Re: [PATCH 2/2] drm/amd/powerplay: correct the pp_feature output on Arcturus

2019-08-22 Thread Wang, Kevin(Yang)
Reviewed-by: Kevin Wang 

Best Regards,
Kevin

From: amd-gfx  on behalf of Quan, Evan 

Sent: Thursday, August 22, 2019 6:20 PM
To: Quan, Evan ; amd-gfx@lists.freedesktop.org 

Cc: Deucher, Alexander 
Subject: RE: [PATCH 2/2] drm/amd/powerplay: correct the pp_feature output on 
Arcturus

Ping..

> -Original Message-
> From: Evan Quan 
> Sent: Wednesday, August 21, 2019 4:42 PM
> To: amd-gfx@lists.freedesktop.org
> Cc: Quan, Evan 
> Subject: [PATCH 2/2] drm/amd/powerplay: correct the pp_feature output on
> Arcturus
>
> Fix for the commit below:
> drm/amd/powerplay: implment sysfs feature status function in smu
>
> Change-Id: Id9a373f8d8866b97450be0aef0ba19d0835d40d8
> Signed-off-by: Evan Quan 
> ---
>  drivers/gpu/drm/amd/powerplay/arcturus_ppt.c  | 2 ++
> drivers/gpu/drm/amd/powerplay/inc/smu_types.h | 1 +
>  2 files changed, 3 insertions(+)
>
> diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
> b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
> index dd6563358e8e..f1f072012fac 100644
> --- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
> +++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
> @@ -141,6 +141,7 @@ static struct smu_11_0_cmn2aisc_mapping
> arcturus_feature_mask_map[SMU_FEATURE_CO
>FEA_MAP(DPM_SOCCLK),
>FEA_MAP(DPM_FCLK),
>FEA_MAP(DPM_MP0CLK),
> + ARCTURUS_FEA_MAP(SMU_FEATURE_XGMI_BIT,
> FEATURE_DPM_XGMI_BIT),
>FEA_MAP(DS_GFXCLK),
>FEA_MAP(DS_SOCCLK),
>FEA_MAP(DS_LCLK),
> @@ -149,6 +150,7 @@ static struct smu_11_0_cmn2aisc_mapping
> arcturus_feature_mask_map[SMU_FEATURE_CO
>FEA_MAP(GFX_ULV),
>ARCTURUS_FEA_MAP(SMU_FEATURE_VCN_PG_BIT,
> FEATURE_DPM_VCN_BIT),
>FEA_MAP(RSMU_SMN_CG),
> + FEA_MAP(WAFL_CG),
>FEA_MAP(PPT),
>FEA_MAP(TDC),
>FEA_MAP(APCC_PLUS),
> diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_types.h
> b/drivers/gpu/drm/amd/powerplay/inc/smu_types.h
> index 72962e842d69..c3c74098f614 100644
> --- a/drivers/gpu/drm/amd/powerplay/inc/smu_types.h
> +++ b/drivers/gpu/drm/amd/powerplay/inc/smu_types.h
> @@ -207,6 +207,7 @@ enum smu_clk_type {
> __SMU_DUMMY_MAP(TEMP_DEPENDENT_VMIN),  \
> __SMU_DUMMY_MAP(MMHUB_PG), \
> __SMU_DUMMY_MAP(ATHUB_PG), \
> +   __SMU_DUMMY_MAP(WAFL_CG),
>
>  #undef __SMU_DUMMY_MAP
>  #define __SMU_DUMMY_MAP(feature)
>SMU_FEATURE_##feature##_BIT
> --
> 2.23.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: [PATCH] drm/amd/powerplay: update cached feature enablement status V2

2019-08-22 Thread Wang, Kevin(Yang)
comment inline.

From: amd-gfx  on behalf of Evan Quan 

Sent: Thursday, August 22, 2019 6:18 PM
To: amd-gfx@lists.freedesktop.org 
Cc: Quan, Evan 
Subject: [PATCH] drm/amd/powerplay: update cached feature enablement status V2

Need to update in cache feature enablement status after pp_feature
settings. Another fix for the commit below:
drm/amd/powerplay: implment sysfs feature status function in smu

V2: update smu_feature_update_enable_state() and relates

Change-Id: I90e29b0d839df26825d5993212f6097c7ad4bebf
[kevin]: this information is not neccessary for public, please remove it.
git config gerrit.createchangeid=false
Signed-off-by: Evan Quan 
---
 drivers/gpu/drm/amd/powerplay/amdgpu_smu.c| 104 +-
 .../gpu/drm/amd/powerplay/inc/amdgpu_smu.h|   1 -
 2 files changed, 52 insertions(+), 53 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c 
b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
index 4df7fb6eaf3c..3e1cd5d9c29e 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -94,6 +94,55 @@ size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, 
char *buf)
 return size;
 }

+static int smu_feature_update_enable_state(struct smu_context *smu,
+  uint64_t feature_mask,
+  bool enabled)
+{
+   struct smu_feature *feature = &smu->smu_feature;
+   uint32_t feature_low = 0, feature_high = 0;
+   uint64_t feature_id;
+   int ret = 0;
+
+   if (!smu->pm_enabled)
+   return ret;
+
+   feature_low = (feature_mask >> 0 ) & 0x;
+   feature_high = (feature_mask >> 32) & 0x;
+
+   if (enabled) {
+   ret = smu_send_smc_msg_with_param(smu, 
SMU_MSG_EnableSmuFeaturesLow,
+ feature_low);
+   if (ret)
+   return ret;
+   ret = smu_send_smc_msg_with_param(smu, 
SMU_MSG_EnableSmuFeaturesHigh,
+ feature_high);
+   if (ret)
+   return ret;
+   } else {
+   ret = smu_send_smc_msg_with_param(smu, 
SMU_MSG_DisableSmuFeaturesLow,
+ feature_low);
+   if (ret)
+   return ret;
+   ret = smu_send_smc_msg_with_param(smu, 
SMU_MSG_DisableSmuFeaturesHigh,
+ feature_high);
+   if (ret)
+   return ret;
+   }
+
+   mutex_lock(&feature->mutex);
+   for (feature_id = 0; feature_id < 64; feature_id++) {
+   if (feature_mask & (1ULL << feature_id)) {
+   if (enabled)
+   test_and_set_bit(feature_id, feature->enabled);
+   else
+   test_and_clear_bit(feature_id, 
feature->enabled);
+   }
+   }

//[kevin]: the code logic is a little redundant.
could you use bellow macro to replace that?
header : linux/bitmap.h
 *  bitmap_and(dst, src1, src2, nbits)  *dst = *src1 & *src2
 *  bitmap_or(dst, src1, src2, nbits)   *dst = *src1 | *src2
 *  bitmap_xor(dst, src1, src2, nbits)  *dst = *src1 ^ *src2
 *  bitmap_andnot(dst, src1, src2, nbits)   *dst = *src1 & ~(*src2)

+   mutex_unlock(&feature->mutex);
+
+   return ret;
+}
+

 int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask)
 {
 int ret = 0;
@@ -591,41 +640,7 @@ int smu_feature_init_dpm(struct smu_context *smu)

 return ret;
 }

[kevin]:
in this patch, i know you only want to fix not cached feature cache issue,
but in v2 patch,
the patch adjust the order of code functions, it seems that this is a brand new 
function,
I don't think it is necessary,
could you just reflect the modified content in the patch, which can facilitate 
us to trace problems and review.
thanks.

-int smu_feature_update_enable_state(struct smu_context *smu, uint64_t 
feature_mask, bool enabled)
-{
-   uint32_t feature_low = 0, feature_high = 0;
-   int ret = 0;

-   if (!smu->pm_enabled)
-   return ret;
-
-   feature_low = (feature_mask >> 0 ) & 0x;
-   feature_high = (feature_mask >> 32) & 0x;
-
-   if (enabled) {
-   ret = smu_send_smc_msg_with_param(smu, 
SMU_MSG_EnableSmuFeaturesLow,
- feature_low);
-   if (ret)
-   return ret;
-   ret = smu_send_smc_msg_with_param(smu, 
SMU_MSG_EnableSmuFeaturesHigh,
- feature_high);
-   if (ret)
-   return ret;
-
-   } else {
-   ret = smu_send_smc_msg_with_param(smu, 
SMU_MSG_DisableSmuFeaturesLow,
-

[PATCH] drm/amdgpu: Fix Vega20 Average Power value

2019-08-22 Thread Russell, Kent
The SMU changed reading from CurrSocketPower to AverageSocketPower, so
reflect this accordingly. This fixes the issue where Average Power
Consumption was being reported as 0 from SMU 40.46-onward

Change-Id: I471f93316820f1401cb497eefe29da68376a4bb9
Signed-off-by: Kent Russell 
---
 drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c | 2 +-
 drivers/gpu/drm/amd/powerplay/vega20_ppt.c | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c 
b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
index 0516c294b377..1820133f0ceb 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
@@ -2101,7 +2101,7 @@ static int vega20_get_gpu_power(struct pp_hwmgr *hwmgr,
if (ret)
return ret;
 
-   *query = metrics_table.CurrSocketPower << 8;
+   *query = metrics_table.AverageSocketPower << 8;
 
return ret;
 }
diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c 
b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
index e14363182691..17af12ee9e78 100644
--- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
@@ -2927,7 +2927,7 @@ static int vega20_get_gpu_power(struct smu_context *smu, 
uint32_t *value)
if (ret)
return ret;
 
-   *value = metrics.CurrSocketPower << 8;
+   *value = metrics.AverageSocketPower << 8;
 
return 0;
 }
-- 
2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH] drm/powerplay: Fix Vega20 Average Power value

2019-08-22 Thread Russell, Kent
The SMU changed reading from CurrSocketPower to AverageSocketPower, so
reflect this accordingly. This fixes the issue where Average Power
Consumption was being reported as 0 from SMU 40.46-onward

v2: Fixed headline prefix

Change-Id: I471f93316820f1401cb497eefe29da68376a4bb9
Signed-off-by: Kent Russell 
---
 drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c | 2 +-
 drivers/gpu/drm/amd/powerplay/vega20_ppt.c | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c 
b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
index 0516c294b377..1820133f0ceb 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
@@ -2101,7 +2101,7 @@ static int vega20_get_gpu_power(struct pp_hwmgr *hwmgr,
if (ret)
return ret;
 
-   *query = metrics_table.CurrSocketPower << 8;
+   *query = metrics_table.AverageSocketPower << 8;
 
return ret;
 }
diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c 
b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
index e14363182691..17af12ee9e78 100644
--- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
@@ -2927,7 +2927,7 @@ static int vega20_get_gpu_power(struct smu_context *smu, 
uint32_t *value)
if (ret)
return ret;
 
-   *value = metrics.CurrSocketPower << 8;
+   *value = metrics.AverageSocketPower << 8;
 
return 0;
 }
-- 
2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: [PATCH v7 1/9] drm_dp_cec: add connector info support.

2019-08-22 Thread Deucher, Alexander
Acked-by: Alex Deucher 

From: Hans Verkuil 
Sent: Thursday, August 22, 2019 4:08 AM
To: Dariusz Marcinkiewicz ; dri-de...@lists.freedesktop.org 
; linux-me...@vger.kernel.org 

Cc: David Airlie ; nouv...@lists.freedesktop.org 
; Dhinakaran Pandiyan 
; Koo, Anthony ; Francis, 
David ; amd-gfx@lists.freedesktop.org 
; Zuo, Jerry ; Ben Skeggs 
; Li, Sun peng (Leo) ; 
intel-...@lists.freedesktop.org ; Maxime 
Ripard ; Rodrigo Vivi ; Sean Paul 
; Thomas Lim ; 
linux-ker...@vger.kernel.org ; Manasi Navare 
; Deucher, Alexander ; 
Koenig, Christian ; Ville Syrjälä 

Subject: Re: [PATCH v7 1/9] drm_dp_cec: add connector info support.

Alex, Ville/Rodrigo, Ben,

Can you (hopefully) Ack this patch so that I can merge it?

Thank you!

Hans

On 8/14/19 12:44 PM, Dariusz Marcinkiewicz wrote:
> Pass the connector info to the CEC adapter. This makes it possible
> to associate the CEC adapter with the corresponding drm connector.
>
> Signed-off-by: Dariusz Marcinkiewicz 
> Signed-off-by: Hans Verkuil 
> Tested-by: Hans Verkuil 
> ---
>  .../display/amdgpu_dm/amdgpu_dm_mst_types.c   |  2 +-
>  drivers/gpu/drm/drm_dp_cec.c  | 25 ---
>  drivers/gpu/drm/i915/display/intel_dp.c   |  4 +--
>  drivers/gpu/drm/nouveau/nouveau_connector.c   |  3 +--
>  include/drm/drm_dp_helper.h   | 17 ++---
>  5 files changed, 27 insertions(+), 24 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c 
> b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
> index 16218a202b591..5ec14efd4d8cb 100644
> --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
> +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
> @@ -416,7 +416,7 @@ void amdgpu_dm_initialize_dp_connector(struct 
> amdgpu_display_manager *dm,
>
>drm_dp_aux_register(&aconnector->dm_dp_aux.aux);
>drm_dp_cec_register_connector(&aconnector->dm_dp_aux.aux,
> -   aconnector->base.name, dm->adev->dev);
> +   &aconnector->base);
>aconnector->mst_mgr.cbs = &dm_mst_cbs;
>drm_dp_mst_topology_mgr_init(
>&aconnector->mst_mgr,
> diff --git a/drivers/gpu/drm/drm_dp_cec.c b/drivers/gpu/drm/drm_dp_cec.c
> index b15cee85b702b..b457c16c3a8bb 100644
> --- a/drivers/gpu/drm/drm_dp_cec.c
> +++ b/drivers/gpu/drm/drm_dp_cec.c
> @@ -8,7 +8,9 @@
>  #include 
>  #include 
>  #include 
> +#include 
>  #include 
> +#include 
>  #include 
>
>  /*
> @@ -295,7 +297,10 @@ static void drm_dp_cec_unregister_work(struct 
> work_struct *work)
>   */
>  void drm_dp_cec_set_edid(struct drm_dp_aux *aux, const struct edid *edid)
>  {
> - u32 cec_caps = CEC_CAP_DEFAULTS | CEC_CAP_NEEDS_HPD;
> + struct drm_connector *connector = aux->cec.connector;
> + u32 cec_caps = CEC_CAP_DEFAULTS | CEC_CAP_NEEDS_HPD |
> +CEC_CAP_CONNECTOR_INFO;
> + struct cec_connector_info conn_info;
>unsigned int num_las = 1;
>u8 cap;
>
> @@ -344,13 +349,17 @@ void drm_dp_cec_set_edid(struct drm_dp_aux *aux, const 
> struct edid *edid)
>
>/* Create a new adapter */
>aux->cec.adap = cec_allocate_adapter(&drm_dp_cec_adap_ops,
> -  aux, aux->cec.name, cec_caps,
> +  aux, connector->name, cec_caps,
> num_las);
>if (IS_ERR(aux->cec.adap)) {
>aux->cec.adap = NULL;
>goto unlock;
>}
> - if (cec_register_adapter(aux->cec.adap, aux->cec.parent)) {
> +
> + cec_fill_conn_info_from_drm(&conn_info, connector);
> + cec_s_conn_info(aux->cec.adap, &conn_info);
> +
> + if (cec_register_adapter(aux->cec.adap, connector->dev->dev)) {
>cec_delete_adapter(aux->cec.adap);
>aux->cec.adap = NULL;
>} else {
> @@ -406,22 +415,20 @@ EXPORT_SYMBOL(drm_dp_cec_unset_edid);
>  /**
>   * drm_dp_cec_register_connector() - register a new connector
>   * @aux: DisplayPort AUX channel
> - * @name: name of the CEC device
> - * @parent: parent device
> + * @connector: drm connector
>   *
>   * A new connector was registered with associated CEC adapter name and
>   * CEC adapter parent device. After registering the name and parent
>   * drm_dp_cec_set_edid() is called to check if the connector supports
>   * CEC and to register a CEC adapter if that is the case.
>   */
> -void drm_dp_cec_register_connector(struct drm_dp_aux *aux, const char *name,
> -struct device *parent)
> +void drm_dp_cec_register_connector(struct drm_dp_aux *aux,
> +struct drm_connector *connector)
>  {
>WARN_ON(aux->cec.adap);
>if (WARN_ON(!aux->transfer))
>return;
> - aux->cec.name = name;
> - aux->cec.parent = parent;
> + aux->cec.connector = co

Re: [PATCH 2/3] amd/amdkfd: add Arcturus vf DID support

2019-08-22 Thread Deucher, Alexander
Reviewed-by: Alex Deucher 

From: Min, Frank 
Sent: Thursday, August 22, 2019 4:24 AM
To: Zeng, Oak ; amd-gfx@lists.freedesktop.org 
; Deucher, Alexander 
Subject: 答复: [PATCH 2/3] amd/amdkfd: add Arcturus vf DID support

Hi Alex,
Would you please help to review the kfd did add patch?

Best Regards,
Frank

-邮件原件-
发件人: Frank.Min 
发送时间: 2019年8月16日 16:59
收件人: amd-gfx@lists.freedesktop.org
抄送: Min, Frank 
主题: [PATCH 2/3] amd/amdkfd: add Arcturus vf DID support

Change-Id: I842cc31ab040b17dcc5765e275e5402df785b34a
Signed-off-by: Frank.Min 
---
 drivers/gpu/drm/amd/amdkfd/kfd_device.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c 
b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 3b9fe62..32b1cfa 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -472,6 +472,7 @@ static const struct kfd_deviceid supported_devices[] = {
 { 0x738C, &arcturus_device_info },  /* Arcturus */
 { 0x7388, &arcturus_device_info },  /* Arcturus */
 { 0x738E, &arcturus_device_info },  /* Arcturus */
+   { 0x7390, &arcturus_device_info },  /* Arcturus vf */
 { 0x7310, &navi10_device_info },/* Navi10 */
 { 0x7312, &navi10_device_info },/* Navi10 */
 { 0x7318, &navi10_device_info },/* Navi10 */
--
2.7.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: atombios stuck executing D850 when trying to switch to 4k@60Hz on Polaris10

2019-08-22 Thread Deucher, Alexander
You need to use dc to support 4k@60.  Remove amdgpu.dc=0 from the kernel 
command line in grub.

Alex

From: amd-gfx  on behalf of Clemens 
Eisserer 
Sent: Thursday, August 22, 2019 4:39 AM
To: amd-gfx@lists.freedesktop.org 
Subject: atombios stuck executing D850 when trying to switch to 4k@60Hz on 
Polaris10

Hi there,

I am trying to connect a LG 32UD59 UHD monitor to a MSI Armor RX570 4G
card via a HDMI2 (cheap) certified cable.
Unfourtunatly the setup only runs at 30Hz, whereas when booting
Windows it automatically selects 3840x2160@59Hz.

I played a bit with adding the modelines manually, however when
enabling those new modes the screen goes black and in syslog I find
the following entries:

[  571.174813] [drm:atom_op_jump [amdgpu]] *ERROR* atombios stuck in
loop for more than 5secs aborting
[  571.174862] [drm:amdgpu_atom_execute_table_locked [amdgpu]] *ERROR*
atombios stuck executing D850 (len 824, WS 0, PS 0) @ 0xD992
[  571.174908] [drm:amdgpu_atom_execute_table_locked [amdgpu]] *ERROR*
atombios stuck executing D70A (len 326, WS 0, PS 0) @ 0xD7A6

Xorg.0.log: https://pastebin.com/LmZ0bvyL
kernel log: https://pastebin.com/rXGVMTnV

Help would be really appreciated - I am rather latency sensitive and
those 30Hz are driving me nuts ;)

Best regards, Clemens
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: [PATCH 2/2] drm/radeon: use WAIT_REG_MEM special op for CP HDP flush

2019-08-22 Thread Deucher, Alexander
This was an old patch I accidentally sent out.  Please ignore.

Alex

From: Christian K?nig 
Sent: Thursday, August 22, 2019 5:32 AM
To: Alex Deucher ; amd-gfx@lists.freedesktop.org 

Cc: Deucher, Alexander 
Subject: Re: [PATCH 2/2] drm/radeon: use WAIT_REG_MEM special op for CP HDP 
flush

Every time we actually tried this it just ended in users reporting CP hangs.

Christian.

Am 22.08.19 um 00:20 schrieb Alex Deucher:
> Flush via the ring works differently on CIK and requires a
> special sequence.
>
> Signed-off-by: Alex Deucher 
> ---
>   drivers/gpu/drm/radeon/cik.c | 73 
> +++-
>   1 file changed, 45 insertions(+), 28 deletions(-)
>
> diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
> index 0847367..03dd075 100644
> --- a/drivers/gpu/drm/radeon/cik.c
> +++ b/drivers/gpu/drm/radeon/cik.c
> @@ -3485,6 +3485,48 @@ int cik_ring_test(struct radeon_device *rdev, struct 
> radeon_ring *ring)
>return r;
>   }
>
> +static void cik_gfx_hdp_flush(struct radeon_device *rdev,
> +   int ridx)
> +{
> + struct radeon_ring *ring = &rdev->ring[ridx];
> + u32 ref_and_mask;
> +
> + switch (ring->idx) {
> + case CAYMAN_RING_TYPE_CP1_INDEX:
> + case CAYMAN_RING_TYPE_CP2_INDEX:
> + switch (ring->me) {
> + case 0:
> + ref_and_mask = CP2 << ring->pipe;
> + break;
> + case 1:
> + ref_and_mask = CP6 << ring->pipe;
> + break;
> + default:
> + return;
> + }
> + break;
> + case RADEON_RING_TYPE_GFX_INDEX:
> + ref_and_mask = CP0;
> + break;
> + default:
> + return;
> + }
> +
> + radeon_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
> + radeon_ring_write(ring, ((CP_WAIT_REG_MEM_TIMEOUT -
> +   PACKET3_SET_UCONFIG_REG_START) >> 2));
> + radeon_ring_write(ring, 0xfff);
> +
> + radeon_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
> + radeon_ring_write(ring, (WAIT_REG_MEM_OPERATION(1) | /* special op */
> +  WAIT_REG_MEM_FUNCTION(3))); /* == */
> + radeon_ring_write(ring, GPU_HDP_FLUSH_REQ >> 2);
> + radeon_ring_write(ring, GPU_HDP_FLUSH_DONE >> 2);
> + radeon_ring_write(ring, ref_and_mask);
> + radeon_ring_write(ring, ref_and_mask);
> + radeon_ring_write(ring, 0xa); /* poll interval */
> +}
> +
>   /**
>* cik_fence_gfx_ring_emit - emit a fence on the gfx ring
>*
> @@ -3511,15 +3553,7 @@ void cik_fence_gfx_ring_emit(struct radeon_device 
> *rdev,
>radeon_ring_write(ring, fence->seq);
>radeon_ring_write(ring, 0);
>/* HDP flush */
> - /* We should be using the new WAIT_REG_MEM special op packet here
> -  * but it causes the CP to hang
> -  */
> - radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
> - radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
> -  WRITE_DATA_DST_SEL(0)));
> - radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
> - radeon_ring_write(ring, 0);
> - radeon_ring_write(ring, 0);
> + cik_gfx_hdp_flush(rdev, fence->ring);
>   }
>
>   /**
> @@ -3549,15 +3583,7 @@ void cik_fence_compute_ring_emit(struct radeon_device 
> *rdev,
>radeon_ring_write(ring, fence->seq);
>radeon_ring_write(ring, 0);
>/* HDP flush */
> - /* We should be using the new WAIT_REG_MEM special op packet here
> -  * but it causes the CP to hang
> -  */
> - radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
> - radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
> -  WRITE_DATA_DST_SEL(0)));
> - radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
> - radeon_ring_write(ring, 0);
> - radeon_ring_write(ring, 0);
> + cik_gfx_hdp_flush(rdev, fence->ring);
>   }
>
>   bool cik_semaphore_ring_emit(struct radeon_device *rdev,
> @@ -5369,16 +5395,7 @@ void cik_vm_flush(struct radeon_device *rdev, int 
> ridx, struct radeon_vm *vm)
>radeon_ring_write(ring, VMID(0));
>
>/* HDP flush */
> - /* We should be using the WAIT_REG_MEM packet here like in
> -  * cik_fence_ring_emit(), but it causes the CP to hang in this
> -  * context...
> -  */
> - radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
> - radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
> -  WRITE_DATA_DST_SEL(0)));
> - radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
> - radeon_ring_write(ring, 0);
> - radeon_ring_write(ring, 0);
> + cik_gfx_hdp_flush(rdev, ridx);
>
>/* bits 0-15 are the VM contexts0-15 */
>radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));

___

Re: [PATCH 1/2] drm/amd/powerplay: correct Vega20 dpm level related settings

2019-08-22 Thread Deucher, Alexander
Acked-by: Alex Deucher 

From: Quan, Evan 
Sent: Thursday, August 22, 2019 6:21 AM
To: Quan, Evan ; amd-gfx@lists.freedesktop.org 

Cc: Deucher, Alexander 
Subject: RE: [PATCH 1/2] drm/amd/powerplay: correct Vega20 dpm level related 
settings

Ping..

> -Original Message-
> From: Evan Quan 
> Sent: Wednesday, August 21, 2019 4:42 PM
> To: amd-gfx@lists.freedesktop.org
> Cc: Quan, Evan 
> Subject: [PATCH 1/2] drm/amd/powerplay: correct Vega20 dpm level related
> settings
>
> Correct the settings for auto mode and skip the unnecessary settings for
> dcefclk and fclk.
>
> Change-Id: I7e6ca75ce86b4d5cd44920a9fbc71b6f36ea3c49
> Signed-off-by: Evan Quan 
> ---
>  .../drm/amd/powerplay/hwmgr/vega20_hwmgr.c| 60
> +--
>  1 file changed, 54 insertions(+), 6 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
> b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
> index 0516c294b377..cc52d5c8ccf9 100644
> --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
> +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
> @@ -2349,12 +2349,16 @@ static int vega20_force_dpm_highest(struct
> pp_hwmgr *hwmgr)
>data->dpm_table.soc_table.dpm_state.soft_max_level =
>data->dpm_table.soc_table.dpm_levels[soft_level].value;
>
> - ret = vega20_upload_dpm_min_level(hwmgr, 0x);
> + ret = vega20_upload_dpm_min_level(hwmgr,
> FEATURE_DPM_GFXCLK_MASK |
> +  FEATURE_DPM_UCLK_MASK
> |
> +
> FEATURE_DPM_SOCCLK_MASK);
>PP_ASSERT_WITH_CODE(!ret,
>"Failed to upload boot level to highest!",
>return ret);
>
> - ret = vega20_upload_dpm_max_level(hwmgr, 0x);
> + ret = vega20_upload_dpm_max_level(hwmgr,
> FEATURE_DPM_GFXCLK_MASK |
> +  FEATURE_DPM_UCLK_MASK
> |
> +
> FEATURE_DPM_SOCCLK_MASK);
>PP_ASSERT_WITH_CODE(!ret,
>"Failed to upload dpm max level to highest!",
>return ret);
> @@ -2387,12 +2391,16 @@ static int vega20_force_dpm_lowest(struct
> pp_hwmgr *hwmgr)
>data->dpm_table.soc_table.dpm_state.soft_max_level =
>data->dpm_table.soc_table.dpm_levels[soft_level].value;
>
> - ret = vega20_upload_dpm_min_level(hwmgr, 0x);
> + ret = vega20_upload_dpm_min_level(hwmgr,
> FEATURE_DPM_GFXCLK_MASK |
> +  FEATURE_DPM_UCLK_MASK
> |
> +
> FEATURE_DPM_SOCCLK_MASK);
>PP_ASSERT_WITH_CODE(!ret,
>"Failed to upload boot level to highest!",
>return ret);
>
> - ret = vega20_upload_dpm_max_level(hwmgr, 0x);
> + ret = vega20_upload_dpm_max_level(hwmgr,
> FEATURE_DPM_GFXCLK_MASK |
> +  FEATURE_DPM_UCLK_MASK
> |
> +
> FEATURE_DPM_SOCCLK_MASK);
>PP_ASSERT_WITH_CODE(!ret,
>"Failed to upload dpm max level to highest!",
>return ret);
> @@ -2403,14 +2411,54 @@ static int vega20_force_dpm_lowest(struct
> pp_hwmgr *hwmgr)
>
>  static int vega20_unforce_dpm_levels(struct pp_hwmgr *hwmgr)  {
> + struct vega20_hwmgr *data =
> + (struct vega20_hwmgr *)(hwmgr->backend);
> + uint32_t soft_min_level, soft_max_level;
>int ret = 0;
>
> - ret = vega20_upload_dpm_min_level(hwmgr, 0x);
> + /* gfxclk soft min/max settings */
> + soft_min_level =
> + vega20_find_lowest_dpm_level(&(data-
> >dpm_table.gfx_table));
> + soft_max_level =
> + vega20_find_highest_dpm_level(&(data-
> >dpm_table.gfx_table));
> +
> + data->dpm_table.gfx_table.dpm_state.soft_min_level =
> + data-
> >dpm_table.gfx_table.dpm_levels[soft_min_level].value;
> + data->dpm_table.gfx_table.dpm_state.soft_max_level =
> + data-
> >dpm_table.gfx_table.dpm_levels[soft_max_level].value;
> +
> + /* uclk soft min/max settings */
> + soft_min_level =
> + vega20_find_lowest_dpm_level(&(data-
> >dpm_table.mem_table));
> + soft_max_level =
> + vega20_find_highest_dpm_level(&(data-
> >dpm_table.mem_table));
> +
> + data->dpm_table.mem_table.dpm_state.soft_min_level =
> + data-
> >dpm_table.mem_table.dpm_levels[soft_min_level].value;
> + data->dpm_table.mem_table.dpm_state.soft_max_level =
> + data-
> >dpm_table.mem_table.dpm_levels[soft_max_level].value;
> +
> + /* socclk soft min/max settings */
> + soft_min_level =
> + vega20_find_lowest_dpm_level(&(data-
> >dpm_table.soc_table));
> + soft_max_level =
> + vega20_find_highest_dpm_level(&(data-
> >dpm_table.soc_table));
> +
> + data->dpm_table.soc_table.dpm_state.soft_min_level =
> + data-
> >dpm_table.soc_table.dpm_levels[soft_min_level].val

Re: [PATCH] drm/amdgpu: Fix Vega20 Average Power value

2019-08-22 Thread Deucher, Alexander
Do we need an smu version check?  Will AverageSocketPower report correctly on 
older versions of smu firmware?  Assuming that is ok, patch is:
Acked-by: Alex Deucher 

From: amd-gfx  on behalf of Russell, 
Kent 
Sent: Thursday, August 22, 2019 8:19 AM
To: amd-gfx@lists.freedesktop.org 
Cc: Russell, Kent 
Subject: [PATCH] drm/amdgpu: Fix Vega20 Average Power value

The SMU changed reading from CurrSocketPower to AverageSocketPower, so
reflect this accordingly. This fixes the issue where Average Power
Consumption was being reported as 0 from SMU 40.46-onward

Change-Id: I471f93316820f1401cb497eefe29da68376a4bb9
Signed-off-by: Kent Russell 
---
 drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c | 2 +-
 drivers/gpu/drm/amd/powerplay/vega20_ppt.c | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c 
b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
index 0516c294b377..1820133f0ceb 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
@@ -2101,7 +2101,7 @@ static int vega20_get_gpu_power(struct pp_hwmgr *hwmgr,
 if (ret)
 return ret;

-   *query = metrics_table.CurrSocketPower << 8;
+   *query = metrics_table.AverageSocketPower << 8;

 return ret;
 }
diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c 
b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
index e14363182691..17af12ee9e78 100644
--- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
@@ -2927,7 +2927,7 @@ static int vega20_get_gpu_power(struct smu_context *smu, 
uint32_t *value)
 if (ret)
 return ret;

-   *value = metrics.CurrSocketPower << 8;
+   *value = metrics.AverageSocketPower << 8;

 return 0;
 }
--
2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

RE: [PATCH] drm/amdgpu: Fix Vega20 Average Power value

2019-08-22 Thread Russell, Kent
Good point, I'll double-check that.

Kent

From: Deucher, Alexander 
Sent: Thursday, August 22, 2019 8:54 AM
To: Russell, Kent ; amd-gfx@lists.freedesktop.org
Subject: Re: [PATCH] drm/amdgpu: Fix Vega20 Average Power value

Do we need an smu version check?  Will AverageSocketPower report correctly on 
older versions of smu firmware?  Assuming that is ok, patch is:
Acked-by: Alex Deucher 
mailto:alexander.deuc...@amd.com>>

From: amd-gfx 
mailto:amd-gfx-boun...@lists.freedesktop.org>>
 on behalf of Russell, Kent mailto:kent.russ...@amd.com>>
Sent: Thursday, August 22, 2019 8:19 AM
To: amd-gfx@lists.freedesktop.org 
mailto:amd-gfx@lists.freedesktop.org>>
Cc: Russell, Kent mailto:kent.russ...@amd.com>>
Subject: [PATCH] drm/amdgpu: Fix Vega20 Average Power value

The SMU changed reading from CurrSocketPower to AverageSocketPower, so
reflect this accordingly. This fixes the issue where Average Power
Consumption was being reported as 0 from SMU 40.46-onward

Change-Id: I471f93316820f1401cb497eefe29da68376a4bb9
Signed-off-by: Kent Russell mailto:kent.russ...@amd.com>>
---
 drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c | 2 +-
 drivers/gpu/drm/amd/powerplay/vega20_ppt.c | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c 
b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
index 0516c294b377..1820133f0ceb 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
@@ -2101,7 +2101,7 @@ static int vega20_get_gpu_power(struct pp_hwmgr *hwmgr,
 if (ret)
 return ret;

-   *query = metrics_table.CurrSocketPower << 8;
+   *query = metrics_table.AverageSocketPower << 8;

 return ret;
 }
diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c 
b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
index e14363182691..17af12ee9e78 100644
--- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
@@ -2927,7 +2927,7 @@ static int vega20_get_gpu_power(struct smu_context *smu, 
uint32_t *value)
 if (ret)
 return ret;

-   *value = metrics.CurrSocketPower << 8;
+   *value = metrics.AverageSocketPower << 8;

 return 0;
 }
--
2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: atombios stuck executing D850 when trying to switch to 4k@60Hz on Polaris10

2019-08-22 Thread Clemens Eisserer
Hi Alex,

> You need to use dc to support 4k@60.  Remove amdgpu.dc=0 from the kernel 
> command line in grub.

Thanks a lot!
Indeed, it worked immediatly after removing those left-overs from my
kaveri-based system.

Br, Clemens
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: [PATCH 2/3] drm: drop resource_id parameter from drm_fb_helper_remove_conflicting_pci_framebuffers

2019-08-22 Thread Daniel Vetter
On Thu, Aug 22, 2019 at 11:06:44AM +0200, Gerd Hoffmann wrote:
> Not needed any more for remove_conflicting_pci_framebuffers calls.
> 
> Signed-off-by: Gerd Hoffmann 

Reviewed-by: Daniel Vetter 

> ---
>  include/drm/drm_fb_helper.h | 4 +---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 2 +-
>  drivers/gpu/drm/bochs/bochs_drv.c   | 2 +-
>  drivers/gpu/drm/cirrus/cirrus.c | 2 +-
>  drivers/gpu/drm/mgag200/mgag200_drv.c   | 2 +-
>  drivers/gpu/drm/qxl/qxl_drv.c   | 2 +-
>  drivers/gpu/drm/radeon/radeon_drv.c | 2 +-
>  drivers/gpu/drm/virtio/virtgpu_drv.c| 1 -
>  8 files changed, 7 insertions(+), 10 deletions(-)
> 
> diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
> index 5a5f4b1d8241..8dcc012ccbc8 100644
> --- a/include/drm/drm_fb_helper.h
> +++ b/include/drm/drm_fb_helper.h
> @@ -539,18 +539,16 @@ drm_fb_helper_remove_conflicting_framebuffers(struct 
> apertures_struct *a,
>  /**
>   * drm_fb_helper_remove_conflicting_pci_framebuffers - remove 
> firmware-configured framebuffers for PCI devices
>   * @pdev: PCI device
> - * @resource_id: index of PCI BAR configuring framebuffer memory
>   * @name: requesting driver name
>   *
>   * This function removes framebuffer devices (eg. initialized by firmware)
> - * using memory range configured for @pdev's BAR @resource_id.
> + * using memory range configured for any of @pdev's memory bars.
>   *
>   * The function assumes that PCI device with shadowed ROM drives a primary
>   * display and so kicks out vga16fb.
>   */
>  static inline int
>  drm_fb_helper_remove_conflicting_pci_framebuffers(struct pci_dev *pdev,
> -   int resource_id,
> const char *name)
>  {
>   int ret = 0;
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
> index 98df55534a6d..6b96a5738e57 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
> @@ -1031,7 +1031,7 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
>   }
>  
>   /* Get rid of things like offb */
> - ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, 
> "amdgpudrmfb");
> + ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 
> "amdgpudrmfb");
>   if (ret)
>   return ret;
>  
> diff --git a/drivers/gpu/drm/bochs/bochs_drv.c 
> b/drivers/gpu/drm/bochs/bochs_drv.c
> index 770e1625d05e..3b9b0d9bbc14 100644
> --- a/drivers/gpu/drm/bochs/bochs_drv.c
> +++ b/drivers/gpu/drm/bochs/bochs_drv.c
> @@ -114,7 +114,7 @@ static int bochs_pci_probe(struct pci_dev *pdev,
>   return -ENOMEM;
>   }
>  
> - ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, 
> "bochsdrmfb");
> + ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 
> "bochsdrmfb");
>   if (ret)
>   return ret;
>  
> diff --git a/drivers/gpu/drm/cirrus/cirrus.c b/drivers/gpu/drm/cirrus/cirrus.c
> index 36a69aec8a4b..89d9e6fdeb8c 100644
> --- a/drivers/gpu/drm/cirrus/cirrus.c
> +++ b/drivers/gpu/drm/cirrus/cirrus.c
> @@ -532,7 +532,7 @@ static int cirrus_pci_probe(struct pci_dev *pdev,
>   struct cirrus_device *cirrus;
>   int ret;
>  
> - ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, 
> "cirrusdrmfb");
> + ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 
> "cirrusdrmfb");
>   if (ret)
>   return ret;
>  
> diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c 
> b/drivers/gpu/drm/mgag200/mgag200_drv.c
> index afd9119b6cf1..4f9df3b93598 100644
> --- a/drivers/gpu/drm/mgag200/mgag200_drv.c
> +++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
> @@ -46,7 +46,7 @@ MODULE_DEVICE_TABLE(pci, pciidlist);
>  
>  static int mga_pci_probe(struct pci_dev *pdev, const struct pci_device_id 
> *ent)
>  {
> - drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, 
> "mgag200drmfb");
> + drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "mgag200drmfb");
>  
>   return drm_get_pci_dev(pdev, ent, &driver);
>  }
> diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
> index c1802e01d9f6..2b726a51a302 100644
> --- a/drivers/gpu/drm/qxl/qxl_drv.c
> +++ b/drivers/gpu/drm/qxl/qxl_drv.c
> @@ -83,7 +83,7 @@ qxl_pci_probe(struct pci_dev *pdev, const struct 
> pci_device_id *ent)
>   if (ret)
>   goto free_dev;
>  
> - ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, "qxl");
> + ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "qxl");
>   if (ret)
>   goto disable_pci;
>  
> diff --git a/drivers/gpu/drm/radeon/radeon_drv.c 
> b/drivers/gpu/drm/radeon/radeon_drv.c
> index a4a78dfdef37..624aa580d418 100644
> --- a/drivers/gpu/drm/radeon/radeon_drv.c
> +++ b/drivers/gpu/drm/radeon/radeon_drv.c
> @@ -329,7 +329,7 @@ static int radeon_pci_probe(struct pci_dev *pdev,
> 

[PATCH] drm/amdgpu/powerplay: remove redundant assignment to variable baco_state

2019-08-22 Thread Colin King
From: Colin Ian King 

Variable baco_state is initialized to a value that is never read and it is
re-assigned later. The initialization is redundant and can be removed.

Addresses-Coverity: ("Unused Value")
Signed-off-by: Colin Ian King 
---
 drivers/gpu/drm/amd/powerplay/smu_v11_0.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c 
b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
index 89749b1d2019..a4aba8576900 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
@@ -1656,7 +1656,7 @@ static bool smu_v11_0_baco_is_support(struct smu_context 
*smu)
 static enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu)
 {
struct smu_baco_context *smu_baco = &smu->smu_baco;
-   enum smu_baco_state baco_state = SMU_BACO_STATE_EXIT;
+   enum smu_baco_state baco_state;
 
mutex_lock(&smu_baco->mutex);
baco_state = smu_baco->state;
-- 
2.20.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: [PATCH v3 13/16] drm/amd/display: Validate DSC caps on MST endpoints

2019-08-22 Thread Francis, David
Whoops, left in a test print.  Ignore this patch


From: David Francis 
Sent: August 21, 2019 4:01 PM
To: dri-de...@lists.freedesktop.org; amd-gfx@lists.freedesktop.org
Cc: Francis, David; Liu, Wenjing; Cornij, Nikola
Subject: [PATCH v3 13/16] drm/amd/display: Validate DSC caps on MST endpoints

During MST mode enumeration, if a new dc_sink is created,
populate it with dsc caps as appropriate.

Use drm_dp_mst_dsc_caps_for_port to get the raw caps,
then parse them onto dc_sink with dc_dsc_parse_dsc_dpcd.

Cc: Wenjing Liu 
Cc: Nikola Cornij 
Signed-off-by: David Francis 
---
 .../display/amdgpu_dm/amdgpu_dm_mst_types.c   | 27 ++-
 1 file changed, 26 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 16218a202b59..9978c1a01eb7 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -25,6 +25,7 @@

 #include 
 #include 
+#include 
 #include "dm_services.h"
 #include "amdgpu.h"
 #include "amdgpu_dm.h"
@@ -189,6 +190,24 @@ static const struct drm_connector_funcs 
dm_dp_mst_connector_funcs = {
.early_unregister = amdgpu_dm_mst_connector_early_unregister,
 };

+#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
+static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector 
*aconnector)
+{
+   struct dc_sink *dc_sink = aconnector->dc_sink;
+   struct drm_dp_mst_port *port = aconnector->port;
+   u8 dsc_caps[16] = { 0 };
+
+   if (drm_dp_mst_dsc_caps_for_port(port, dsc_caps) < 0)
+   return false;
+
+   printk("Validated DSC caps 0x%x", dsc_caps[0]);
+   if (!dc_dsc_parse_dsc_dpcd(dsc_caps, NULL, 
&dc_sink->sink_dsc_caps.dsc_dec_caps))
+   return false;
+
+   return true;
+}
+#endif
+
 static int dm_dp_mst_get_modes(struct drm_connector *connector)
 {
struct amdgpu_dm_connector *aconnector = 
to_amdgpu_dm_connector(connector);
@@ -231,10 +250,16 @@ static int dm_dp_mst_get_modes(struct drm_connector 
*connector)
/* dc_link_add_remote_sink returns a new reference */
aconnector->dc_sink = dc_sink;

-   if (aconnector->dc_sink)
+   if (aconnector->dc_sink) {
amdgpu_dm_update_freesync_caps(
connector, aconnector->edid);

+#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
+   if (!validate_dsc_caps_on_connector(aconnector))
+   memset(&aconnector->dc_sink->sink_dsc_caps,
+  0, 
sizeof(aconnector->dc_sink->sink_dsc_caps));
+#endif
+   }
}

drm_connector_update_edid_property(
--
2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH] drm/powerplay: Fix Vega20 Average Power value v3

2019-08-22 Thread Russell, Kent
The SMU changed reading from CurrSocketPower to AverageSocketPower, so
reflect this accordingly. This fixes the issue where Average Power
Consumption was being reported as 0 from SMU 40.46-onward

v2: Fixed headline prefix
v3: Add check for SMU version for proper compatibility

Change-Id: I471f93316820f1401cb497eefe29da68376a4bb9
Signed-off-by: Kent Russell 
---
 drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c |  5 -
 drivers/gpu/drm/amd/powerplay/vega20_ppt.c | 10 +-
 2 files changed, 13 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c 
b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
index 0516c294b377..9f50a12f5c03 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
@@ -2101,7 +2101,10 @@ static int vega20_get_gpu_power(struct pp_hwmgr *hwmgr,
if (ret)
return ret;
 
-   *query = metrics_table.CurrSocketPower << 8;
+   if (hwmgr->smu_version < 0x282e00)
+   *query = metrics_table.CurrSocketPower << 8;
+   else
+   *query = metrics_table.AverageSocketPower << 8;
 
return ret;
 }
diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c 
b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
index e14363182691..6b4be5237ef0 100644
--- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
@@ -2917,6 +2917,7 @@ static int vega20_get_fan_speed_percent(struct 
smu_context *smu,
 
 static int vega20_get_gpu_power(struct smu_context *smu, uint32_t *value)
 {
+   uint32_t smu_version;
int ret = 0;
SmuMetrics_t metrics;
 
@@ -2927,7 +2928,14 @@ static int vega20_get_gpu_power(struct smu_context *smu, 
uint32_t *value)
if (ret)
return ret;
 
-   *value = metrics.CurrSocketPower << 8;
+   ret = smu_get_smc_version(smu, NULL, &smu_version);
+if (ret)
+return ret;
+
+   if (smu_version < 0x282e00)
+   *value = metrics.CurrSocketPower << 8;
+   else
+   *value = metrics.AverageSocketPower << 8;
 
return 0;
 }
-- 
2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH] drm/powerplay: Fix Vega20 Average Power value v4

2019-08-22 Thread Russell, Kent
The SMU changed reading from CurrSocketPower to AverageSocketPower, so
reflect this accordingly. This fixes the issue where Average Power
Consumption was being reported as 0 from SMU 40.46-onward

v2: Fixed headline prefix
v3: Add check for SMU version for proper compatibility
v4: Style fix

Change-Id: I471f93316820f1401cb497eefe29da68376a4bb9
Signed-off-by: Kent Russell 
---
 drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c |  5 -
 drivers/gpu/drm/amd/powerplay/vega20_ppt.c | 10 +-
 2 files changed, 13 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c 
b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
index 0516c294b377..9f50a12f5c03 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
@@ -2101,7 +2101,10 @@ static int vega20_get_gpu_power(struct pp_hwmgr *hwmgr,
if (ret)
return ret;
 
-   *query = metrics_table.CurrSocketPower << 8;
+   if (hwmgr->smu_version < 0x282e00)
+   *query = metrics_table.CurrSocketPower << 8;
+   else
+   *query = metrics_table.AverageSocketPower << 8;
 
return ret;
 }
diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c 
b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
index e14363182691..0fac824490d7 100644
--- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
@@ -2917,6 +2917,7 @@ static int vega20_get_fan_speed_percent(struct 
smu_context *smu,
 
 static int vega20_get_gpu_power(struct smu_context *smu, uint32_t *value)
 {
+   uint32_t smu_version;
int ret = 0;
SmuMetrics_t metrics;
 
@@ -2927,7 +2928,14 @@ static int vega20_get_gpu_power(struct smu_context *smu, 
uint32_t *value)
if (ret)
return ret;
 
-   *value = metrics.CurrSocketPower << 8;
+   ret = smu_get_smc_version(smu, NULL, &smu_version);
+   if (ret)
+   return ret;
+
+   if (smu_version < 0x282e00)
+   *value = metrics.CurrSocketPower << 8;
+   else
+   *value = metrics.AverageSocketPower << 8;
 
return 0;
 }
-- 
2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: [PATCH] drm/powerplay: Fix Vega20 Average Power value v4

2019-08-22 Thread Deucher, Alexander
Reviewed-by: Alex Deucher 

From: amd-gfx  on behalf of Russell, 
Kent 
Sent: Thursday, August 22, 2019 9:33 AM
To: amd-gfx@lists.freedesktop.org 
Cc: Russell, Kent 
Subject: [PATCH] drm/powerplay: Fix Vega20 Average Power value v4

The SMU changed reading from CurrSocketPower to AverageSocketPower, so
reflect this accordingly. This fixes the issue where Average Power
Consumption was being reported as 0 from SMU 40.46-onward

v2: Fixed headline prefix
v3: Add check for SMU version for proper compatibility
v4: Style fix

Change-Id: I471f93316820f1401cb497eefe29da68376a4bb9
Signed-off-by: Kent Russell 
---
 drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c |  5 -
 drivers/gpu/drm/amd/powerplay/vega20_ppt.c | 10 +-
 2 files changed, 13 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c 
b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
index 0516c294b377..9f50a12f5c03 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
@@ -2101,7 +2101,10 @@ static int vega20_get_gpu_power(struct pp_hwmgr *hwmgr,
 if (ret)
 return ret;

-   *query = metrics_table.CurrSocketPower << 8;
+   if (hwmgr->smu_version < 0x282e00)
+   *query = metrics_table.CurrSocketPower << 8;
+   else
+   *query = metrics_table.AverageSocketPower << 8;

 return ret;
 }
diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c 
b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
index e14363182691..0fac824490d7 100644
--- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
@@ -2917,6 +2917,7 @@ static int vega20_get_fan_speed_percent(struct 
smu_context *smu,

 static int vega20_get_gpu_power(struct smu_context *smu, uint32_t *value)
 {
+   uint32_t smu_version;
 int ret = 0;
 SmuMetrics_t metrics;

@@ -2927,7 +2928,14 @@ static int vega20_get_gpu_power(struct smu_context *smu, 
uint32_t *value)
 if (ret)
 return ret;

-   *value = metrics.CurrSocketPower << 8;
+   ret = smu_get_smc_version(smu, NULL, &smu_version);
+   if (ret)
+   return ret;
+
+   if (smu_version < 0x282e00)
+   *value = metrics.CurrSocketPower << 8;
+   else
+   *value = metrics.AverageSocketPower << 8;

 return 0;
 }
--
2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: [PATCH v3 00/16] Display Stream Compression (DSC) for AMD Navi

2019-08-22 Thread Francis, David
I was building against amd-staging-drm-next (commit e4a67e6cf14c).

v4 will contain just the drm-mst patches and will apply on latest 
drm-tip/drm-tip (commit 018886de4726)


From: Lyude Paul 
Sent: August 21, 2019 5:20 PM
To: Francis, David; dri-de...@lists.freedesktop.org; 
amd-gfx@lists.freedesktop.org
Subject: Re: [PATCH v3 00/16] Display Stream Compression (DSC) for AMD Navi

What branch does this patch series actually apply to? I've been trying to
apply this locally, but it doesn't appear to apply against drm-tip/drm-tip,
amdgpu-next/drm-next, or origin (e.g. kernel.org) /master. Is there any chance
we could have this go against drm-tip instead (and even better, split out the
DRM-specific bits into their own patch series?)

On Wed, 2019-08-21 at 16:01 -0400, David Francis wrote:
> This patchset enables Display Stream Compression (DSC) on DP
> connectors on Navi ASICs, both SST and DSC.
>
> 8k60 and 4k144 support requires ODM combine, an AMD internal
> feature that may be a bit buggy right now.
>
> Patches 1 through 5 enable DSC for SST. Most of the work was
> already done in the Navi promotion patches; this just hooks
> it up to the atomic interface. The first two reverts are of temporary
> changes to block off DSC. The third is of a commit that was
> accidentally promoted twice. The fourth and last revert fixes a
> potential issue with ODM combine.
>
> Patches 6, 7 and 8 are fixes for bugs that would be exposed by
> MST DSC. Patches 6 and 7 add and use a new DRM helper for MST
> calculations. Patch 8 fixes a silly use-uninitialized
>
> Patches 9, 10, and 11 are small DRM changes required for DSC MST:
> FEC, a new bit in the standard; MST DPCD from drivers; and
> a previously uninitialized variable.
>
> Patches 12 through 16 are the DSC MST policy itself. Patch 12
> adds DSC aux access helpers to DRM, and patches 13 and 14 make
> use of those helpers. Patch 15 deals with dividing bandwidth
> fairly between multiple streams, and patch 16 ensures
> that MST CRTC that may change DSC config are reprogrammed
>
> v2: Updating patches 6 and 14 in respoinse to Nick's feedback
> v3: Add return value to patch 6 and split it (now patches 6 & 7)
> New patch 10 adding MST DPCD read/write support
> Minor fix (num_ports--) to patch 11
> Add DRM helpers (patch 12)
>
> David Francis (16):
>   Revert "drm/amd/display: skip dsc config for navi10 bring up"
>   Revert "drm/amd/display: navi10 bring up skip dsc encoder config"
>   Revert "drm/amd/display: add global master update lock for DCN2"
>   Revert "drm/amd/display: Fix underscan not using proper scaling"
>   drm/amd/display: Enable SST DSC in DM
>   drm/dp-mst: Add PBN calculation for DSC modes
>   drm/amd/display: Use correct helpers to compute timeslots
>   drm/amd/display: Initialize DSC PPS variables to 0
>   drm/dp-mst: Parse FEC capability on MST ports
>   drm/dp-mst: Add MST support to DP DPCD R/W functions
>   drm/dp-mst: Fill branch->num_ports
>   drm/dp-mst: Add helpers for querying and enabling MST DSC
>   drm/amd/display: Validate DSC caps on MST endpoints
>   drm/amd/display: Write DSC enable to MST DPCD
>   drm/amd/display: MST DSC compute fair share
>   drm/amd/display: Trigger modesets on MST DSC connectors
>
>  .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 113 -
>  .../amd/display/amdgpu_dm/amdgpu_dm_helpers.c |  33 +-
>  .../display/amdgpu_dm/amdgpu_dm_mst_types.c   | 402 +-
>  .../display/amdgpu_dm/amdgpu_dm_mst_types.h   |   4 +
>  drivers/gpu/drm/amd/display/dc/core/dc.c  |  12 +-
>  .../drm/amd/display/dc/core/dc_link_hwss.c|   3 +
>  .../gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c  |   3 +
>  .../drm/amd/display/dc/dcn20/dcn20_hwseq.c|   4 -
>  .../gpu/drm/amd/display/dc/dcn20/dcn20_optc.c |  72 +---
>  .../gpu/drm/amd/display/dc/dcn20/dcn20_optc.h |   3 -
>  .../drm/amd/display/dc/dcn20/dcn20_resource.c |   7 +-
>  .../drm/amd/display/dc/dcn20/dcn20_resource.h |   1 +
>  .../display/dc/dcn20/dcn20_stream_encoder.c   |   8 -
>  .../amd/display/dc/inc/hw/timing_generator.h  |   2 -
>  drivers/gpu/drm/drm_dp_aux_dev.c  |  12 +-
>  drivers/gpu/drm/drm_dp_helper.c   |  10 +-
>  drivers/gpu/drm/drm_dp_mst_topology.c | 240 +++
>  include/drm/drm_dp_mst_helper.h   |   8 +-
>  18 files changed, 806 insertions(+), 131 deletions(-)
>
--
Cheers,
Lyude Paul

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: [PATCH] drm/powerplay: Fix Vega20 Average Power value v4

2019-08-22 Thread Wang, Kevin(Yang)
Reviewed-by: Kevin Wang 

Best Regards,
Kevin

From: amd-gfx  on behalf of Deucher, 
Alexander 
Sent: Thursday, August 22, 2019 9:36 PM
To: Russell, Kent ; amd-gfx@lists.freedesktop.org 

Subject: Re: [PATCH] drm/powerplay: Fix Vega20 Average Power value v4

Reviewed-by: Alex Deucher 

From: amd-gfx  on behalf of Russell, 
Kent 
Sent: Thursday, August 22, 2019 9:33 AM
To: amd-gfx@lists.freedesktop.org 
Cc: Russell, Kent 
Subject: [PATCH] drm/powerplay: Fix Vega20 Average Power value v4

The SMU changed reading from CurrSocketPower to AverageSocketPower, so
reflect this accordingly. This fixes the issue where Average Power
Consumption was being reported as 0 from SMU 40.46-onward

v2: Fixed headline prefix
v3: Add check for SMU version for proper compatibility
v4: Style fix

Change-Id: I471f93316820f1401cb497eefe29da68376a4bb9
Signed-off-by: Kent Russell 
---
 drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c |  5 -
 drivers/gpu/drm/amd/powerplay/vega20_ppt.c | 10 +-
 2 files changed, 13 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c 
b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
index 0516c294b377..9f50a12f5c03 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
@@ -2101,7 +2101,10 @@ static int vega20_get_gpu_power(struct pp_hwmgr *hwmgr,
 if (ret)
 return ret;

-   *query = metrics_table.CurrSocketPower << 8;
+   if (hwmgr->smu_version < 0x282e00)
+   *query = metrics_table.CurrSocketPower << 8;
+   else
+   *query = metrics_table.AverageSocketPower << 8;

 return ret;
 }
diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c 
b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
index e14363182691..0fac824490d7 100644
--- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
@@ -2917,6 +2917,7 @@ static int vega20_get_fan_speed_percent(struct 
smu_context *smu,

 static int vega20_get_gpu_power(struct smu_context *smu, uint32_t *value)
 {
+   uint32_t smu_version;
 int ret = 0;
 SmuMetrics_t metrics;

@@ -2927,7 +2928,14 @@ static int vega20_get_gpu_power(struct smu_context *smu, 
uint32_t *value)
 if (ret)
 return ret;

-   *value = metrics.CurrSocketPower << 8;
+   ret = smu_get_smc_version(smu, NULL, &smu_version);
+   if (ret)
+   return ret;
+
+   if (smu_version < 0x282e00)
+   *value = metrics.CurrSocketPower << 8;
+   else
+   *value = metrics.AverageSocketPower << 8;

 return 0;
 }
--
2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH] Revert "drm/amdgpu: make function pointers mandatory"

2019-08-22 Thread Gang Ba
This reverts commit f54b30d70bc606f7a154edba5883c7fa23838e9f.

Change-Id: I22327aac390297bdf6a19b3ac33fadb47be1e96d
---
 drivers/gpu/drm/amd/amdgpu/cik_ih.c  |  3 ++-
 drivers/gpu/drm/amd/amdgpu/cik_sdma.c| 20 
 drivers/gpu/drm/amd/amdgpu/cz_ih.c   |  3 ++-
 drivers/gpu/drm/amd/amdgpu/dce_v10_0.c   |  3 ++-
 drivers/gpu/drm/amd/amdgpu/dce_v11_0.c   |  3 ++-
 drivers/gpu/drm/amd/amdgpu/dce_v6_0.c|  3 ++-
 drivers/gpu/drm/amd/amdgpu/dce_v8_0.c|  3 ++-
 drivers/gpu/drm/amd/amdgpu/dce_virtual.c |  3 ++-
 drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c|  3 ++-
 drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c|  3 ++-
 drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c|  3 ++-
 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c|  3 ++-
 drivers/gpu/drm/amd/amdgpu/iceland_ih.c  |  3 ++-
 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c   | 20 
 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c   | 20 
 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c   | 25 ++---
 drivers/gpu/drm/amd/amdgpu/si_dma.c  | 20 
 drivers/gpu/drm/amd/amdgpu/si_ih.c   |  3 ++-
 drivers/gpu/drm/amd/amdgpu/tonga_ih.c|  3 ++-
 drivers/gpu/drm/amd/amdgpu/vega10_ih.c   |  3 ++-
 20 files changed, 84 insertions(+), 66 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c 
b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
index 721c757..3b5b2ab 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
@@ -441,7 +441,8 @@ static const struct amdgpu_ih_funcs cik_ih_funcs = {
 
 static void cik_ih_set_interrupt_funcs(struct amdgpu_device *adev)
 {
-   adev->irq.ih_funcs = &cik_ih_funcs;
+   if (adev->irq.ih_funcs == NULL)
+   adev->irq.ih_funcs = &cik_ih_funcs;
 }
 
 const struct amdgpu_ip_block_version cik_ih_ip_block =
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c 
b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index d42808b..f30a961 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -1356,8 +1356,10 @@ static const struct amdgpu_buffer_funcs 
cik_sdma_buffer_funcs = {
 
 static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev)
 {
-   adev->mman.buffer_funcs = &cik_sdma_buffer_funcs;
-   adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
+   if (adev->mman.buffer_funcs == NULL) {
+   adev->mman.buffer_funcs = &cik_sdma_buffer_funcs;
+   adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
+   }
 }
 
 static const struct amdgpu_vm_pte_funcs cik_sdma_vm_pte_funcs = {
@@ -1373,13 +1375,15 @@ static void cik_sdma_set_vm_pte_funcs(struct 
amdgpu_device *adev)
struct drm_gpu_scheduler *sched;
unsigned i;
 
-   adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs;
-   for (i = 0; i < adev->sdma.num_instances; i++) {
-   sched = &adev->sdma.instance[i].ring.sched;
-   adev->vm_manager.vm_pte_rqs[i] =
-   &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
+   if (adev->vm_manager.vm_pte_funcs == NULL) {
+   adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs;
+   for (i = 0; i < adev->sdma.num_instances; i++) {
+   sched = &adev->sdma.instance[i].ring.sched;
+   adev->vm_manager.vm_pte_rqs[i] =
+   &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
+   }
+   adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
}
-   adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
 }
 
 const struct amdgpu_ip_block_version cik_sdma_ip_block =
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c 
b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
index 61024b9..76af75e 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
@@ -422,7 +422,8 @@ static const struct amdgpu_ih_funcs cz_ih_funcs = {
 
 static void cz_ih_set_interrupt_funcs(struct amdgpu_device *adev)
 {
-   adev->irq.ih_funcs = &cz_ih_funcs;
+   if (adev->irq.ih_funcs == NULL)
+   adev->irq.ih_funcs = &cz_ih_funcs;
 }
 
 const struct amdgpu_ip_block_version cz_ih_ip_block =
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c 
b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index c609b7a..c337d71 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -3574,7 +3574,8 @@ static const struct amdgpu_display_funcs 
dce_v10_0_display_funcs = {
 
 static void dce_v10_0_set_display_funcs(struct amdgpu_device *adev)
 {
-   adev->mode_info.funcs = &dce_v10_0_display_funcs;
+   if (adev->mode_info.funcs == NULL)
+   adev->mode_info.funcs = &dce_v10_0_display_funcs;
 }
 
 static const struct amdgpu_irq_src_funcs dce_v10_0_crtc_irq_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c 
b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 719db05..e0c6b36 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce

Re: [PATCH] Revert "drm/amdgpu: make function pointers mandatory"

2019-08-22 Thread Christian König

Am 22.08.19 um 16:49 schrieb Gang Ba:

This reverts commit f54b30d70bc606f7a154edba5883c7fa23838e9f.


??? Why we want to revert that one?

Christian.



Change-Id: I22327aac390297bdf6a19b3ac33fadb47be1e96d
---
  drivers/gpu/drm/amd/amdgpu/cik_ih.c  |  3 ++-
  drivers/gpu/drm/amd/amdgpu/cik_sdma.c| 20 
  drivers/gpu/drm/amd/amdgpu/cz_ih.c   |  3 ++-
  drivers/gpu/drm/amd/amdgpu/dce_v10_0.c   |  3 ++-
  drivers/gpu/drm/amd/amdgpu/dce_v11_0.c   |  3 ++-
  drivers/gpu/drm/amd/amdgpu/dce_v6_0.c|  3 ++-
  drivers/gpu/drm/amd/amdgpu/dce_v8_0.c|  3 ++-
  drivers/gpu/drm/amd/amdgpu/dce_virtual.c |  3 ++-
  drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c|  3 ++-
  drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c|  3 ++-
  drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c|  3 ++-
  drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c|  3 ++-
  drivers/gpu/drm/amd/amdgpu/iceland_ih.c  |  3 ++-
  drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c   | 20 
  drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c   | 20 
  drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c   | 25 ++---
  drivers/gpu/drm/amd/amdgpu/si_dma.c  | 20 
  drivers/gpu/drm/amd/amdgpu/si_ih.c   |  3 ++-
  drivers/gpu/drm/amd/amdgpu/tonga_ih.c|  3 ++-
  drivers/gpu/drm/amd/amdgpu/vega10_ih.c   |  3 ++-
  20 files changed, 84 insertions(+), 66 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c 
b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
index 721c757..3b5b2ab 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
@@ -441,7 +441,8 @@ static const struct amdgpu_ih_funcs cik_ih_funcs = {
  
  static void cik_ih_set_interrupt_funcs(struct amdgpu_device *adev)

  {
-   adev->irq.ih_funcs = &cik_ih_funcs;
+   if (adev->irq.ih_funcs == NULL)
+   adev->irq.ih_funcs = &cik_ih_funcs;
  }
  
  const struct amdgpu_ip_block_version cik_ih_ip_block =

diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c 
b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index d42808b..f30a961 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -1356,8 +1356,10 @@ static const struct amdgpu_buffer_funcs 
cik_sdma_buffer_funcs = {
  
  static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev)

  {
-   adev->mman.buffer_funcs = &cik_sdma_buffer_funcs;
-   adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
+   if (adev->mman.buffer_funcs == NULL) {
+   adev->mman.buffer_funcs = &cik_sdma_buffer_funcs;
+   adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
+   }
  }
  
  static const struct amdgpu_vm_pte_funcs cik_sdma_vm_pte_funcs = {

@@ -1373,13 +1375,15 @@ static void cik_sdma_set_vm_pte_funcs(struct 
amdgpu_device *adev)
struct drm_gpu_scheduler *sched;
unsigned i;
  
-	adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs;

-   for (i = 0; i < adev->sdma.num_instances; i++) {
-   sched = &adev->sdma.instance[i].ring.sched;
-   adev->vm_manager.vm_pte_rqs[i] =
-   &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
+   if (adev->vm_manager.vm_pte_funcs == NULL) {
+   adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs;
+   for (i = 0; i < adev->sdma.num_instances; i++) {
+   sched = &adev->sdma.instance[i].ring.sched;
+   adev->vm_manager.vm_pte_rqs[i] =
+   &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
+   }
+   adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
}
-   adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
  }
  
  const struct amdgpu_ip_block_version cik_sdma_ip_block =

diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c 
b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
index 61024b9..76af75e 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
@@ -422,7 +422,8 @@ static const struct amdgpu_ih_funcs cz_ih_funcs = {
  
  static void cz_ih_set_interrupt_funcs(struct amdgpu_device *adev)

  {
-   adev->irq.ih_funcs = &cz_ih_funcs;
+   if (adev->irq.ih_funcs == NULL)
+   adev->irq.ih_funcs = &cz_ih_funcs;
  }
  
  const struct amdgpu_ip_block_version cz_ih_ip_block =

diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c 
b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index c609b7a..c337d71 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -3574,7 +3574,8 @@ static const struct amdgpu_display_funcs 
dce_v10_0_display_funcs = {
  
  static void dce_v10_0_set_display_funcs(struct amdgpu_device *adev)

  {
-   adev->mode_info.funcs = &dce_v10_0_display_funcs;
+   if (adev->mode_info.funcs == NULL)
+   adev->mode_info.funcs = &dce_v10_0_display_funcs;
  }
  
  static const struct amdgpu_irq_src_funcs dce_v10_0_crtc_irq_funcs = {

diff --git a/dr

Re: [PATCH v4 1/4] drm/amdgpu: Add RAS EEPROM table.

2019-08-22 Thread Grodzovsky, Andrey

On 8/22/19 12:13 AM, Zhou1, Tao wrote:





-Original Message-
From: Andrey Grodzovsky 

Sent: 2019年8月22日 4:02
To: amd-gfx@lists.freedesktop.org
Cc: Deucher, Alexander 
; Pan, Xinhui
; Zhang, Hawking 
;
Tuikov, Luben ; Lazar, Lijo 
;
Quan, Evan ; Panariti, David
; Russell, Kent 
; Zhou1,
Tao ; Grodzovsky, Andrey

Subject: [PATCH v4 1/4] drm/amdgpu: Add RAS EEPROM table.

Add RAS EEPROM table manager to eanble RAS errors to be stored upon
appearance and retrived on driver load.

v2: Fix some prints.

v3:
Fix checksum calculation.
Make table record and header structs packed to do correct byte value sum.
Fix record crossing EEPROM page boundry.

v4:
Fix byte sum val calculation for record - look at sizeof(record).
Fix some style comments.

Signed-off-by: Andrey Grodzovsky 

---
 drivers/gpu/drm/amd/amdgpu/Makefile|   2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h|   3 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c | 482
+
drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h |  90 +
 4 files changed, 576 insertions(+), 1 deletion(-)  create mode 100644
drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
 create mode 100644
drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h

diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile
b/drivers/gpu/drm/amd/amdgpu/Makefile
index 28d76bd..f016cf1 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -54,7 +54,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o
amdgpu_atomfirmware.o \
amdgpu_vf_error.o amdgpu_sched.o amdgpu_debugfs.o
amdgpu_ids.o \
amdgpu_gmc.o amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o
amdgpu_vm_cpu.o \
-   amdgpu_vm_sdma.o amdgpu_discovery.o
+   amdgpu_vm_sdma.o amdgpu_pmu.o amdgpu_discovery.o
amdgpu_ras_eeprom.o

 amdgpu-$(CONFIG_PERF_EVENTS) += amdgpu_pmu.o

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
index 2765f2d..8d5bcd8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
@@ -29,6 +29,7 @@
 #include "amdgpu.h"
 #include "amdgpu_psp.h"
 #include "ta_ras_if.h"
+#include "amdgpu_ras_eeprom.h"

 enum amdgpu_ras_block {
AMDGPU_RAS_BLOCK__UMC = 0,
@@ -333,6 +334,8 @@ struct amdgpu_ras {
struct mutex recovery_lock;

uint32_t flags;
+
+   struct amdgpu_ras_eeprom_control eeprom_control;
 };

 struct ras_fs_data {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
new file mode 100644
index 000..bf07515
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
@@ -0,0 +1,482 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person
+obtaining a
+ * copy of this software and associated documentation files (the
+"Software"),
+ * to deal in the Software without restriction, including without
+limitation
+ * the rights to use, copy, modify, merge, publish, distribute,
+sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom
+the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO
EVENT
+SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM,
+DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR
THE USE
+OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "amdgpu_ras_eeprom.h"
+#include "amdgpu.h"
+#include "amdgpu_ras.h"
+#include 
+
+#define EEPROM_I2C_TARGET_ADDR 0xA0
+
+#define EEPROM_TABLE_HEADER_SIZE 20
+#define EEPROM_TABLE_RECORD_SIZE 24


[Tao] should we replace fixed value with sizeof for the two macros?


No, as I already explained before the EEPROM_TABLE_HEADER/RECORD_SIZE represent 
the actual size in bytes that those entities occupy in the EEPROM memory and as 
defined in the EEPROM table description while struct eeprom_table_record would 
be larger because for example to store 6b of RetiedPage I have to use uint64 
which is 8b. I could of course use some kind of bitvector (bitset) to store 
them compactly but it seems to me 

Re: [PATCH v4 4/4] drm/amdgpu: Vega20 SMU I2C HW engine controller.

2019-08-22 Thread Grodzovsky, Andrey

On 8/21/19 10:32 PM, Alex Deucher wrote:
> On Wed, Aug 21, 2019 at 4:02 PM Andrey Grodzovsky
>  wrote:
>> Implement HW I2C enigne controller to be used by the RAS EEPROM
>> table manager. This is based on code from ATITOOLs.
>>
>> v2:
>> Rename the file and all function prefixes to smu_v11_0_i2c
>>
>> By Luben's observation always fill the TX fifo to full so
>> we don't have garbadge interpreted by the slave as valid data.
>>
>> v3:
>> Remove preemption disable as the HW I2C controller will not
>> stop the clock on empty TX fifo and so it's not critical to
>> keep not empty queue.
>> Switch to fast mode 400 khz SCL clock for faster read and write.
>>
>> Signed-off-by: Andrey Grodzovsky 
>> ---
>>   drivers/gpu/drm/amd/amdgpu/Makefile|   5 +-
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c |   5 +-
>>   drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c | 710 
>> +
>>   drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.h |  34 ++
>>   4 files changed, 751 insertions(+), 3 deletions(-)
>>   create mode 100644 drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c
>>   create mode 100644 drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.h
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile 
>> b/drivers/gpu/drm/amd/amdgpu/Makefile
>> index f016cf1..14733ff 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/Makefile
>> +++ b/drivers/gpu/drm/amd/amdgpu/Makefile
>> @@ -38,6 +38,9 @@ ccflags-y := -I$(FULL_AMD_PATH)/include/asic_reg \
>>  -I$(FULL_AMD_DISPLAY_PATH)/amdgpu_dm \
>>  -I$(FULL_AMD_PATH)/amdkfd
>>
>> +
>> +
>> +
> Drop this random whitespace change.
>
>>   amdgpu-y := amdgpu_drv.o
>>
>>   # add KMS driver
>> @@ -54,7 +57,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
>>  amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o 
>> amdgpu_atomfirmware.o \
>>  amdgpu_vf_error.o amdgpu_sched.o amdgpu_debugfs.o amdgpu_ids.o \
>>  amdgpu_gmc.o amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o 
>> amdgpu_vm_cpu.o \
>> -   amdgpu_vm_sdma.o amdgpu_pmu.o amdgpu_discovery.o amdgpu_ras_eeprom.o
>> +   amdgpu_vm_sdma.o amdgpu_pmu.o amdgpu_discovery.o amdgpu_ras_eeprom.o 
>> smu_v11_0_i2c.o
>>
>>   amdgpu-$(CONFIG_PERF_EVENTS) += amdgpu_pmu.o
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c 
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
>> index bf07515..e6b2e17 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
>> @@ -25,6 +25,7 @@
>>   #include "amdgpu.h"
>>   #include "amdgpu_ras.h"
>>   #include 
>> +#include "smu_v11_0_i2c.h"
>>
>>   #define EEPROM_I2C_TARGET_ADDR 0xA0
>>
>> @@ -111,7 +112,7 @@ int amdgpu_ras_eeprom_init(struct 
>> amdgpu_ras_eeprom_control *control)
>>
>>  switch (adev->asic_type) {
>>  case CHIP_VEGA20:
>> -   /*TODO Add MI-60 */
>> +   ret = 
>> smu_v11_0_i2c_eeprom_control_init(&control->eeprom_accessor);
>>  break;
>>
>>  default:
>> @@ -163,7 +164,7 @@ void amdgpu_ras_eeprom_fini(struct 
>> amdgpu_ras_eeprom_control *control)
>>
>>  switch (adev->asic_type) {
>>  case CHIP_VEGA20:
>> -   /*TODO Add MI-60 */
>> +   smu_v11_0_i2c_eeprom_control_fini(&control->eeprom_accessor);
>>  break;
>>
>>  default:
>> diff --git a/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c 
>> b/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c
>> new file mode 100644
>> index 000..24405fa
>> --- /dev/null
>> +++ b/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c
>> @@ -0,0 +1,710 @@
>> +/*
>> + * Copyright 2019 Advanced Micro Devices, Inc.
>> + *
>> + * Permission is hereby granted, free of charge, to any person obtaining a
>> + * copy of this software and associated documentation files (the 
>> "Software"),
>> + * to deal in the Software without restriction, including without limitation
>> + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
>> + * and/or sell copies of the Software, and to permit persons to whom the
>> + * Software is furnished to do so, subject to the following conditions:
>> + *
>> + * The above copyright notice and this permission notice shall be included 
>> in
>> + * all copies or substantial portions of the Software.
>> + *
>> + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 
>> OR
>> + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
>> + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
>> + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
>> + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
>> + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
>> + * OTHER DEALINGS IN THE SOFTWARE.
>> + *
>> + */
>> +
>> +#include "smuio/smuio_11_0_0_offset.h"
>> +#include "smuio/smuio_11_0_0_sh_mask.h"
>> +
>> +#include "smu_v11_0_i2c.h"
>> +#include "amdgpu.h"
>> +#include "soc15_common.h"
>> +#inc

[PATCH 1/3] drm/amdgpu/powerplay/smu7: enable mclk switching if monitors are synced

2019-08-22 Thread Alex Deucher
If DC has synced the displays, we can enable mclk switching to
save power.

Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 7 ---
 1 file changed, 4 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c 
b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 3c1084de5d59..34f95e0e3ea4 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -2956,9 +2956,10 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr 
*hwmgr,
if (hwmgr->display_config->num_display == 0)
disable_mclk_switching = false;
else
-   disable_mclk_switching = ((1 < 
hwmgr->display_config->num_display) ||
- disable_mclk_switching_for_frame_lock 
||
- smu7_vblank_too_short(hwmgr, 
hwmgr->display_config->min_vblank_time));
+   disable_mclk_switching = ((1 < 
hwmgr->display_config->num_display) &&
+ 
!hwmgr->display_config->multi_monitor_in_sync) ||
+   disable_mclk_switching_for_frame_lock ||
+   smu7_vblank_too_short(hwmgr, 
hwmgr->display_config->min_vblank_time);
 
sclk = smu7_ps->performance_levels[0].engine_clock;
mclk = smu7_ps->performance_levels[0].memory_clock;
-- 
2.20.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 2/3] drm/amdgpu/powerplay/vega10: enable mclk switching if monitors are synced

2019-08-22 Thread Alex Deucher
If DC has synced the displays, we can enable mclk switching to
save power.

Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c 
b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index 948c54cb9c5d..d08493b67b67 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -3220,7 +3220,8 @@ static int vega10_apply_state_adjust_rules(struct 
pp_hwmgr *hwmgr,
if (hwmgr->display_config->num_display == 0)
disable_mclk_switching = false;
else
-   disable_mclk_switching = (hwmgr->display_config->num_display > 
1) ||
+   disable_mclk_switching = ((1 < 
hwmgr->display_config->num_display) &&
+ 
!hwmgr->display_config->multi_monitor_in_sync) ||
disable_mclk_switching_for_frame_lock ||
disable_mclk_switching_for_vr ||
force_mclk_high;
-- 
2.20.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 3/3] drm/amd/display: update bw_calcs to take pipe sync into account (v2)

2019-08-22 Thread Alex Deucher
Properly set all_displays_in_sync so that when the data is
propagated to powerplay, it's set properly and we can enable
mclk switching when all monitors are in sync.

v2: fix logic, clean up

Signed-off-by: Alex Deucher 
---
 .../gpu/drm/amd/display/dc/calcs/dce_calcs.c  | 49 ++-
 1 file changed, 47 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c 
b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
index 9f12e21f8b9b..8d904647fb0f 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
@@ -25,6 +25,7 @@
 
 #include 
 
+#include "resource.h"
 #include "dm_services.h"
 #include "dce_calcs.h"
 #include "dc.h"
@@ -2977,6 +2978,50 @@ static void populate_initial_data(
data->number_of_displays = num_displays;
 }
 
+static bool all_displays_in_sync(const struct pipe_ctx pipe[],
+int pipe_count,
+uint32_t number_of_displays)
+{
+   const struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL };
+   int group_size = 1;
+   int i, j;
+
+   for (i = 0; i < pipe_count; i++) {
+   if (!pipe[i].stream)
+   continue;
+
+   unsynced_pipes[i] = &pipe[i];
+   }
+
+   for (i = 0; i < pipe_count; i++) {
+   const struct pipe_ctx *pipe_set[MAX_PIPES];
+
+   if (!unsynced_pipes[i])
+   continue;
+
+   pipe_set[0] = unsynced_pipes[i];
+   unsynced_pipes[i] = NULL;
+
+   /* Add tg to the set, search rest of the tg's for ones with
+* same timing, add all tgs with same timing to the group
+*/
+   for (j = i + 1; j < pipe_count; j++) {
+   if (!unsynced_pipes[j])
+   continue;
+
+   if (resource_are_streams_timing_synchronizable(
+   unsynced_pipes[j]->stream,
+   pipe_set[0]->stream)) {
+   pipe_set[group_size] = unsynced_pipes[j];
+   unsynced_pipes[j] = NULL;
+   group_size++;
+   }
+   }
+   }
+
+   return (group_size == number_of_displays) ? true : false;
+}
+
 /**
  * Return:
  * true -  Display(s) configuration supported.
@@ -2998,8 +3043,8 @@ bool bw_calcs(struct dc_context *ctx,
 
populate_initial_data(pipe, pipe_count, data);
 
-   /*TODO: this should be taken out calcs output and assigned during 
timing sync for pplib use*/
-   calcs_output->all_displays_in_sync = false;
+   calcs_output->all_displays_in_sync = all_displays_in_sync(pipe, 
pipe_count,
+ 
data->number_of_displays);
 
if (data->number_of_displays != 0) {
uint8_t yclk_lvl, sclk_lvl;
-- 
2.20.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 0/3] Support mclk switching when monitors are in sync

2019-08-22 Thread Alex Deucher
This patch set enables mclk switching with multiple monitors when all
monitors are sync.  Normally mclk switching is not available with
multiple monitors because the vblank timing does not line up.  However,
if the timing is identical, the display driver can sync up the displays
in some cases.  Check for these cases and allow mclk switch when
possible.

Alex Deucher (3):
  drm/amdgpu/powerplay/smu7: enable mclk switching if monitors are
synced
  drm/amdgpu/powerplay/vega10: enable mclk switching if monitors are
synced
  drm/amd/display: update bw_calcs to take pipe sync into account (v2)

 .../gpu/drm/amd/display/dc/calcs/dce_calcs.c  | 49 ++-
 .../gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c  |  7 +--
 .../drm/amd/powerplay/hwmgr/vega10_hwmgr.c|  3 +-
 3 files changed, 53 insertions(+), 6 deletions(-)

-- 
2.20.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: [PATCH v4 4/4] drm/amdgpu: Vega20 SMU I2C HW engine controller.

2019-08-22 Thread Alex Deucher
On Thu, Aug 22, 2019 at 11:35 AM Grodzovsky, Andrey
 wrote:
>
>
> On 8/21/19 10:32 PM, Alex Deucher wrote:
> > On Wed, Aug 21, 2019 at 4:02 PM Andrey Grodzovsky
> >  wrote:
> >> Implement HW I2C enigne controller to be used by the RAS EEPROM
> >> table manager. This is based on code from ATITOOLs.
> >>
> >> v2:
> >> Rename the file and all function prefixes to smu_v11_0_i2c
> >>
> >> By Luben's observation always fill the TX fifo to full so
> >> we don't have garbadge interpreted by the slave as valid data.
> >>
> >> v3:
> >> Remove preemption disable as the HW I2C controller will not
> >> stop the clock on empty TX fifo and so it's not critical to
> >> keep not empty queue.
> >> Switch to fast mode 400 khz SCL clock for faster read and write.
> >>
> >> Signed-off-by: Andrey Grodzovsky 
> >> ---
> >>   drivers/gpu/drm/amd/amdgpu/Makefile|   5 +-
> >>   drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c |   5 +-
> >>   drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c | 710 
> >> +
> >>   drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.h |  34 ++
> >>   4 files changed, 751 insertions(+), 3 deletions(-)
> >>   create mode 100644 drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c
> >>   create mode 100644 drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.h
> >>
> >> diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile 
> >> b/drivers/gpu/drm/amd/amdgpu/Makefile
> >> index f016cf1..14733ff 100644
> >> --- a/drivers/gpu/drm/amd/amdgpu/Makefile
> >> +++ b/drivers/gpu/drm/amd/amdgpu/Makefile
> >> @@ -38,6 +38,9 @@ ccflags-y := -I$(FULL_AMD_PATH)/include/asic_reg \
> >>  -I$(FULL_AMD_DISPLAY_PATH)/amdgpu_dm \
> >>  -I$(FULL_AMD_PATH)/amdkfd
> >>
> >> +
> >> +
> >> +
> > Drop this random whitespace change.
> >
> >>   amdgpu-y := amdgpu_drv.o
> >>
> >>   # add KMS driver
> >> @@ -54,7 +57,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
> >>  amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o 
> >> amdgpu_atomfirmware.o \
> >>  amdgpu_vf_error.o amdgpu_sched.o amdgpu_debugfs.o amdgpu_ids.o \
> >>  amdgpu_gmc.o amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o 
> >> amdgpu_vm_cpu.o \
> >> -   amdgpu_vm_sdma.o amdgpu_pmu.o amdgpu_discovery.o 
> >> amdgpu_ras_eeprom.o
> >> +   amdgpu_vm_sdma.o amdgpu_pmu.o amdgpu_discovery.o 
> >> amdgpu_ras_eeprom.o smu_v11_0_i2c.o
> >>
> >>   amdgpu-$(CONFIG_PERF_EVENTS) += amdgpu_pmu.o
> >>
> >> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c 
> >> b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
> >> index bf07515..e6b2e17 100644
> >> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
> >> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
> >> @@ -25,6 +25,7 @@
> >>   #include "amdgpu.h"
> >>   #include "amdgpu_ras.h"
> >>   #include 
> >> +#include "smu_v11_0_i2c.h"
> >>
> >>   #define EEPROM_I2C_TARGET_ADDR 0xA0
> >>
> >> @@ -111,7 +112,7 @@ int amdgpu_ras_eeprom_init(struct 
> >> amdgpu_ras_eeprom_control *control)
> >>
> >>  switch (adev->asic_type) {
> >>  case CHIP_VEGA20:
> >> -   /*TODO Add MI-60 */
> >> +   ret = 
> >> smu_v11_0_i2c_eeprom_control_init(&control->eeprom_accessor);
> >>  break;
> >>
> >>  default:
> >> @@ -163,7 +164,7 @@ void amdgpu_ras_eeprom_fini(struct 
> >> amdgpu_ras_eeprom_control *control)
> >>
> >>  switch (adev->asic_type) {
> >>  case CHIP_VEGA20:
> >> -   /*TODO Add MI-60 */
> >> +   
> >> smu_v11_0_i2c_eeprom_control_fini(&control->eeprom_accessor);
> >>  break;
> >>
> >>  default:
> >> diff --git a/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c 
> >> b/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c
> >> new file mode 100644
> >> index 000..24405fa
> >> --- /dev/null
> >> +++ b/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c
> >> @@ -0,0 +1,710 @@
> >> +/*
> >> + * Copyright 2019 Advanced Micro Devices, Inc.
> >> + *
> >> + * Permission is hereby granted, free of charge, to any person obtaining a
> >> + * copy of this software and associated documentation files (the 
> >> "Software"),
> >> + * to deal in the Software without restriction, including without 
> >> limitation
> >> + * the rights to use, copy, modify, merge, publish, distribute, 
> >> sublicense,
> >> + * and/or sell copies of the Software, and to permit persons to whom the
> >> + * Software is furnished to do so, subject to the following conditions:
> >> + *
> >> + * The above copyright notice and this permission notice shall be 
> >> included in
> >> + * all copies or substantial portions of the Software.
> >> + *
> >> + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 
> >> EXPRESS OR
> >> + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 
> >> MERCHANTABILITY,
> >> + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT 
> >> SHALL
> >> + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES 
> >> OR
> >> + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTR

Re: [PATCH 3/3] drm/amd/display: update bw_calcs to take pipe sync into account (v2)

2019-08-22 Thread Kazlauskas, Nicholas
On 8/22/19 11:36 AM, Alex Deucher wrote:
> Properly set all_displays_in_sync so that when the data is
> propagated to powerplay, it's set properly and we can enable
> mclk switching when all monitors are in sync.
> 
> v2: fix logic, clean up
> 
> Signed-off-by: Alex Deucher 
> ---
>   .../gpu/drm/amd/display/dc/calcs/dce_calcs.c  | 49 ++-
>   1 file changed, 47 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c 
> b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
> index 9f12e21f8b9b..8d904647fb0f 100644
> --- a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
> +++ b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
> @@ -25,6 +25,7 @@
>   
>   #include 
>   
> +#include "resource.h"
>   #include "dm_services.h"
>   #include "dce_calcs.h"
>   #include "dc.h"
> @@ -2977,6 +2978,50 @@ static void populate_initial_data(
>   data->number_of_displays = num_displays;
>   }
>   
> +static bool all_displays_in_sync(const struct pipe_ctx pipe[],
> +  int pipe_count,
> +  uint32_t number_of_displays)
> +{
> + const struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL };
> + int group_size = 1;
> + int i, j;
> +
> + for (i = 0; i < pipe_count; i++) {
> + if (!pipe[i].stream)

This bit differs from program_timing_sync, but since this is for dce and 
we don't do pipe split or MPO I think it's probably fine that you're not 
checking top_pipe here.

Wouldn't hurt to have that logic here though.

> + continue;
> +
> + unsynced_pipes[i] = &pipe[i];
> + }
> +
> + for (i = 0; i < pipe_count; i++) {
> + const struct pipe_ctx *pipe_set[MAX_PIPES];
> +
> + if (!unsynced_pipes[i])
> + continue;
> +
> + pipe_set[0] = unsynced_pipes[i];
> + unsynced_pipes[i] = NULL;
> +
> + /* Add tg to the set, search rest of the tg's for ones with
> +  * same timing, add all tgs with same timing to the group
> +  */
> + for (j = i + 1; j < pipe_count; j++) {
> + if (!unsynced_pipes[j])
> + continue;
> +
> + if (resource_are_streams_timing_synchronizable(
> + unsynced_pipes[j]->stream,
> + pipe_set[0]->stream)) {
> + pipe_set[group_size] = unsynced_pipes[j];
> + unsynced_pipes[j] = NULL;
> + group_size++;
> + }
> + }
> + }
> +
> + return (group_size == number_of_displays) ? true : false;

I think this logic is functional but it looks incorrect at first glance 
because group_size doesn't get reset. What ends up happening is the 
first pipe of each group doesn't get added to group_size.

I feel that this would be more clear as:

static bool all_displays_in_sync(const struct pipe_ctx pipe[], int 
pipe_count)
{
const struct pipe_ctx *active_pipes[MAX_PIPES];
int i, num_active_pipes = 0;

for (i = 0; i < pipe_count; i++) {
if (!pipe[i].stream || pipe[i].top_pipe)
continue;

active_pipes[num_active_pipes++] = &pipe[i];
}

if (!num_active_pipes)
return false;

for (i = 1; i < num_active_pipes; ++i)
if (!resource_are_streams_timing_synchronizable(
active_pipes[0]->stream, active_pipes[i]->stream))
return false;

return true;
}

But I haven't tested this.

Nicholas Kazlauskas


> +}
> +
>   /**
>* Return:
>*  true -  Display(s) configuration supported.
> @@ -2998,8 +3043,8 @@ bool bw_calcs(struct dc_context *ctx,
>   
>   populate_initial_data(pipe, pipe_count, data);
>   
> - /*TODO: this should be taken out calcs output and assigned during 
> timing sync for pplib use*/
> - calcs_output->all_displays_in_sync = false;
> + calcs_output->all_displays_in_sync = all_displays_in_sync(pipe, 
> pipe_count,
> +   
> data->number_of_displays);
>   
>   if (data->number_of_displays != 0) {
>   uint8_t yclk_lvl, sclk_lvl;
> 

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: [PATCH 3/3] drm/amd/display: update bw_calcs to take pipe sync into account (v2)

2019-08-22 Thread Alex Deucher
On Thu, Aug 22, 2019 at 12:25 PM Kazlauskas, Nicholas
 wrote:
>
> On 8/22/19 11:36 AM, Alex Deucher wrote:
> > Properly set all_displays_in_sync so that when the data is
> > propagated to powerplay, it's set properly and we can enable
> > mclk switching when all monitors are in sync.
> >
> > v2: fix logic, clean up
> >
> > Signed-off-by: Alex Deucher 
> > ---
> >   .../gpu/drm/amd/display/dc/calcs/dce_calcs.c  | 49 ++-
> >   1 file changed, 47 insertions(+), 2 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c 
> > b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
> > index 9f12e21f8b9b..8d904647fb0f 100644
> > --- a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
> > +++ b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
> > @@ -25,6 +25,7 @@
> >
> >   #include 
> >
> > +#include "resource.h"
> >   #include "dm_services.h"
> >   #include "dce_calcs.h"
> >   #include "dc.h"
> > @@ -2977,6 +2978,50 @@ static void populate_initial_data(
> >   data->number_of_displays = num_displays;
> >   }
> >
> > +static bool all_displays_in_sync(const struct pipe_ctx pipe[],
> > +  int pipe_count,
> > +  uint32_t number_of_displays)
> > +{
> > + const struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL };
> > + int group_size = 1;
> > + int i, j;
> > +
> > + for (i = 0; i < pipe_count; i++) {
> > + if (!pipe[i].stream)
>
> This bit differs from program_timing_sync, but since this is for dce and
> we don't do pipe split or MPO I think it's probably fine that you're not
> checking top_pipe here.
>
> Wouldn't hurt to have that logic here though.
>

I had checked for top_pipe here originally, but it was always NULL so
unsynced_pipes never got populated.  Maybe that is not populated
properly at this point?

> > + continue;
> > +
> > + unsynced_pipes[i] = &pipe[i];
> > + }
> > +
> > + for (i = 0; i < pipe_count; i++) {
> > + const struct pipe_ctx *pipe_set[MAX_PIPES];
> > +
> > + if (!unsynced_pipes[i])
> > + continue;
> > +
> > + pipe_set[0] = unsynced_pipes[i];
> > + unsynced_pipes[i] = NULL;
> > +
> > + /* Add tg to the set, search rest of the tg's for ones with
> > +  * same timing, add all tgs with same timing to the group
> > +  */
> > + for (j = i + 1; j < pipe_count; j++) {
> > + if (!unsynced_pipes[j])
> > + continue;
> > +
> > + if (resource_are_streams_timing_synchronizable(
> > + unsynced_pipes[j]->stream,
> > + pipe_set[0]->stream)) {
> > + pipe_set[group_size] = unsynced_pipes[j];
> > + unsynced_pipes[j] = NULL;
> > + group_size++;
> > + }
> > + }
> > + }
> > +
> > + return (group_size == number_of_displays) ? true : false;
>
> I think this logic is functional but it looks incorrect at first glance
> because group_size doesn't get reset. What ends up happening is the
> first pipe of each group doesn't get added to group_size.
>
> I feel that this would be more clear as:
>
> static bool all_displays_in_sync(const struct pipe_ctx pipe[], int
> pipe_count)
> {
> const struct pipe_ctx *active_pipes[MAX_PIPES];
> int i, num_active_pipes = 0;
>
> for (i = 0; i < pipe_count; i++) {
> if (!pipe[i].stream || pipe[i].top_pipe)
> continue;
>
> active_pipes[num_active_pipes++] = &pipe[i];
> }
>
> if (!num_active_pipes)
> return false;
>
> for (i = 1; i < num_active_pipes; ++i)
> if (!resource_are_streams_timing_synchronizable(
> active_pipes[0]->stream, active_pipes[i]->stream))
> return false;
>
> return true;
> }

Yes, that's much cleaner.  Thanks!

Alex

>
> But I haven't tested this.
>
> Nicholas Kazlauskas
>
>
> > +}
> > +
> >   /**
> >* Return:
> >*  true -  Display(s) configuration supported.
> > @@ -2998,8 +3043,8 @@ bool bw_calcs(struct dc_context *ctx,
> >
> >   populate_initial_data(pipe, pipe_count, data);
> >
> > - /*TODO: this should be taken out calcs output and assigned during 
> > timing sync for pplib use*/
> > - calcs_output->all_displays_in_sync = false;
> > + calcs_output->all_displays_in_sync = all_displays_in_sync(pipe, 
> > pipe_count,
> > +   
> > data->number_of_displays);
> >
> >   if (data->number_of_displays != 0) {
> >   uint8_t yclk_lvl, sclk_lvl;
> >
>
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https:/

Re: [PATCH 3/3] drm/amd/display: update bw_calcs to take pipe sync into account (v2)

2019-08-22 Thread Kazlauskas, Nicholas
On 8/22/19 12:31 PM, Alex Deucher wrote:
> On Thu, Aug 22, 2019 at 12:25 PM Kazlauskas, Nicholas
>  wrote:
>>
>> On 8/22/19 11:36 AM, Alex Deucher wrote:
>>> Properly set all_displays_in_sync so that when the data is
>>> propagated to powerplay, it's set properly and we can enable
>>> mclk switching when all monitors are in sync.
>>>
>>> v2: fix logic, clean up
>>>
>>> Signed-off-by: Alex Deucher 
>>> ---
>>>.../gpu/drm/amd/display/dc/calcs/dce_calcs.c  | 49 ++-
>>>1 file changed, 47 insertions(+), 2 deletions(-)
>>>
>>> diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c 
>>> b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
>>> index 9f12e21f8b9b..8d904647fb0f 100644
>>> --- a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
>>> +++ b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
>>> @@ -25,6 +25,7 @@
>>>
>>>#include 
>>>
>>> +#include "resource.h"
>>>#include "dm_services.h"
>>>#include "dce_calcs.h"
>>>#include "dc.h"
>>> @@ -2977,6 +2978,50 @@ static void populate_initial_data(
>>>data->number_of_displays = num_displays;
>>>}
>>>
>>> +static bool all_displays_in_sync(const struct pipe_ctx pipe[],
>>> +  int pipe_count,
>>> +  uint32_t number_of_displays)
>>> +{
>>> + const struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL };
>>> + int group_size = 1;
>>> + int i, j;
>>> +
>>> + for (i = 0; i < pipe_count; i++) {
>>> + if (!pipe[i].stream)
>>
>> This bit differs from program_timing_sync, but since this is for dce and
>> we don't do pipe split or MPO I think it's probably fine that you're not
>> checking top_pipe here.
>>
>> Wouldn't hurt to have that logic here though.
>>
> 
> I had checked for top_pipe here originally, but it was always NULL so
> unsynced_pipes never got populated.  Maybe that is not populated
> properly at this point?

The presence of a top_pipe on a pipe indicates that the pipe is part of 
a blending chain. A NULL top_pipe value indicates that the current pipe 
is the top of the chain.

It should be NULL for all pipes on DCE ASICs.

Nicholas Kazlauskas

> 
>>> + continue;
>>> +
>>> + unsynced_pipes[i] = &pipe[i];
>>> + }
>>> +
>>> + for (i = 0; i < pipe_count; i++) {
>>> + const struct pipe_ctx *pipe_set[MAX_PIPES];
>>> +
>>> + if (!unsynced_pipes[i])
>>> + continue;
>>> +
>>> + pipe_set[0] = unsynced_pipes[i];
>>> + unsynced_pipes[i] = NULL;
>>> +
>>> + /* Add tg to the set, search rest of the tg's for ones with
>>> +  * same timing, add all tgs with same timing to the group
>>> +  */
>>> + for (j = i + 1; j < pipe_count; j++) {
>>> + if (!unsynced_pipes[j])
>>> + continue;
>>> +
>>> + if (resource_are_streams_timing_synchronizable(
>>> + unsynced_pipes[j]->stream,
>>> + pipe_set[0]->stream)) {
>>> + pipe_set[group_size] = unsynced_pipes[j];
>>> + unsynced_pipes[j] = NULL;
>>> + group_size++;
>>> + }
>>> + }
>>> + }
>>> +
>>> + return (group_size == number_of_displays) ? true : false;
>>
>> I think this logic is functional but it looks incorrect at first glance
>> because group_size doesn't get reset. What ends up happening is the
>> first pipe of each group doesn't get added to group_size.
>>
>> I feel that this would be more clear as:
>>
>> static bool all_displays_in_sync(const struct pipe_ctx pipe[], int
>> pipe_count)
>> {
>>  const struct pipe_ctx *active_pipes[MAX_PIPES];
>>  int i, num_active_pipes = 0;
>>
>>  for (i = 0; i < pipe_count; i++) {
>>  if (!pipe[i].stream || pipe[i].top_pipe)
>>  continue;
>>
>>  active_pipes[num_active_pipes++] = &pipe[i];
>>  }
>>
>>  if (!num_active_pipes)
>>  return false;
>>
>>  for (i = 1; i < num_active_pipes; ++i)
>>  if (!resource_are_streams_timing_synchronizable(
>>  active_pipes[0]->stream, 
>> active_pipes[i]->stream))
>>  return false;
>>
>>  return true;
>> }
> 
> Yes, that's much cleaner.  Thanks!
> 
> Alex
> 
>>
>> But I haven't tested this.
>>
>> Nicholas Kazlauskas
>>
>>
>>> +}
>>> +
>>>/**
>>> * Return:
>>> *  true -  Display(s) configuration supported.
>>> @@ -2998,8 +3043,8 @@ bool bw_calcs(struct dc_context *ctx,
>>>
>>>populate_initial_data(pipe, pipe_count, data);
>>>
>>> - /*TODO: this should be taken out calcs output and assigned during 
>>> timing sync for pplib use*/
>>> - calcs_output->all_displays_in_sync = false;
>>> + calcs_out

Re: [PATCH v4 4/4] drm/amdgpu: Vega20 SMU I2C HW engine controller.

2019-08-22 Thread Grodzovsky, Andrey

On 8/22/19 11:59 AM, Alex Deucher wrote:
> On Thu, Aug 22, 2019 at 11:35 AM Grodzovsky, Andrey
>  wrote:
>>
>> On 8/21/19 10:32 PM, Alex Deucher wrote:
>>> On Wed, Aug 21, 2019 at 4:02 PM Andrey Grodzovsky
>>>  wrote:
 Implement HW I2C enigne controller to be used by the RAS EEPROM
 table manager. This is based on code from ATITOOLs.

 v2:
 Rename the file and all function prefixes to smu_v11_0_i2c

 By Luben's observation always fill the TX fifo to full so
 we don't have garbadge interpreted by the slave as valid data.

 v3:
 Remove preemption disable as the HW I2C controller will not
 stop the clock on empty TX fifo and so it's not critical to
 keep not empty queue.
 Switch to fast mode 400 khz SCL clock for faster read and write.

 Signed-off-by: Andrey Grodzovsky 
 ---
drivers/gpu/drm/amd/amdgpu/Makefile|   5 +-
drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c |   5 +-
drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c | 710 
 +
drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.h |  34 ++
4 files changed, 751 insertions(+), 3 deletions(-)
create mode 100644 drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c
create mode 100644 drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.h

 diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile 
 b/drivers/gpu/drm/amd/amdgpu/Makefile
 index f016cf1..14733ff 100644
 --- a/drivers/gpu/drm/amd/amdgpu/Makefile
 +++ b/drivers/gpu/drm/amd/amdgpu/Makefile
 @@ -38,6 +38,9 @@ ccflags-y := -I$(FULL_AMD_PATH)/include/asic_reg \
   -I$(FULL_AMD_DISPLAY_PATH)/amdgpu_dm \
   -I$(FULL_AMD_PATH)/amdkfd

 +
 +
 +
>>> Drop this random whitespace change.
>>>
amdgpu-y := amdgpu_drv.o

# add KMS driver
 @@ -54,7 +57,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
   amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o 
 amdgpu_atomfirmware.o \
   amdgpu_vf_error.o amdgpu_sched.o amdgpu_debugfs.o amdgpu_ids.o \
   amdgpu_gmc.o amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o 
 amdgpu_vm_cpu.o \
 -   amdgpu_vm_sdma.o amdgpu_pmu.o amdgpu_discovery.o 
 amdgpu_ras_eeprom.o
 +   amdgpu_vm_sdma.o amdgpu_pmu.o amdgpu_discovery.o 
 amdgpu_ras_eeprom.o smu_v11_0_i2c.o

amdgpu-$(CONFIG_PERF_EVENTS) += amdgpu_pmu.o

 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c 
 b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
 index bf07515..e6b2e17 100644
 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
 @@ -25,6 +25,7 @@
#include "amdgpu.h"
#include "amdgpu_ras.h"
#include 
 +#include "smu_v11_0_i2c.h"

#define EEPROM_I2C_TARGET_ADDR 0xA0

 @@ -111,7 +112,7 @@ int amdgpu_ras_eeprom_init(struct 
 amdgpu_ras_eeprom_control *control)

   switch (adev->asic_type) {
   case CHIP_VEGA20:
 -   /*TODO Add MI-60 */
 +   ret = 
 smu_v11_0_i2c_eeprom_control_init(&control->eeprom_accessor);
   break;

   default:
 @@ -163,7 +164,7 @@ void amdgpu_ras_eeprom_fini(struct 
 amdgpu_ras_eeprom_control *control)

   switch (adev->asic_type) {
   case CHIP_VEGA20:
 -   /*TODO Add MI-60 */
 +   
 smu_v11_0_i2c_eeprom_control_fini(&control->eeprom_accessor);
   break;

   default:
 diff --git a/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c 
 b/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c
 new file mode 100644
 index 000..24405fa
 --- /dev/null
 +++ b/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c
 @@ -0,0 +1,710 @@
 +/*
 + * Copyright 2019 Advanced Micro Devices, Inc.
 + *
 + * Permission is hereby granted, free of charge, to any person obtaining a
 + * copy of this software and associated documentation files (the 
 "Software"),
 + * to deal in the Software without restriction, including without 
 limitation
 + * the rights to use, copy, modify, merge, publish, distribute, 
 sublicense,
 + * and/or sell copies of the Software, and to permit persons to whom the
 + * Software is furnished to do so, subject to the following conditions:
 + *
 + * The above copyright notice and this permission notice shall be 
 included in
 + * all copies or substantial portions of the Software.
 + *
 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 
 EXPRESS OR
 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 
 MERCHANTABILITY,
 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT 
 SHALL
 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM,

Re: [PATCH v4 4/4] drm/amdgpu: Vega20 SMU I2C HW engine controller.

2019-08-22 Thread Alex Deucher
On Thu, Aug 22, 2019 at 12:45 PM Grodzovsky, Andrey
 wrote:
>
>
> On 8/22/19 11:59 AM, Alex Deucher wrote:
> > On Thu, Aug 22, 2019 at 11:35 AM Grodzovsky, Andrey
> >  wrote:
> >>
> >> On 8/21/19 10:32 PM, Alex Deucher wrote:
> >>> On Wed, Aug 21, 2019 at 4:02 PM Andrey Grodzovsky
> >>>  wrote:
>  Implement HW I2C enigne controller to be used by the RAS EEPROM
>  table manager. This is based on code from ATITOOLs.
> 
>  v2:
>  Rename the file and all function prefixes to smu_v11_0_i2c
> 
>  By Luben's observation always fill the TX fifo to full so
>  we don't have garbadge interpreted by the slave as valid data.
> 
>  v3:
>  Remove preemption disable as the HW I2C controller will not
>  stop the clock on empty TX fifo and so it's not critical to
>  keep not empty queue.
>  Switch to fast mode 400 khz SCL clock for faster read and write.
> 
>  Signed-off-by: Andrey Grodzovsky 
>  ---
> drivers/gpu/drm/amd/amdgpu/Makefile|   5 +-
> drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c |   5 +-
> drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c | 710 
>  +
> drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.h |  34 ++
> 4 files changed, 751 insertions(+), 3 deletions(-)
> create mode 100644 drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c
> create mode 100644 drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.h
> 
>  diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile 
>  b/drivers/gpu/drm/amd/amdgpu/Makefile
>  index f016cf1..14733ff 100644
>  --- a/drivers/gpu/drm/amd/amdgpu/Makefile
>  +++ b/drivers/gpu/drm/amd/amdgpu/Makefile
>  @@ -38,6 +38,9 @@ ccflags-y := -I$(FULL_AMD_PATH)/include/asic_reg \
>    -I$(FULL_AMD_DISPLAY_PATH)/amdgpu_dm \
>    -I$(FULL_AMD_PATH)/amdkfd
> 
>  +
>  +
>  +
> >>> Drop this random whitespace change.
> >>>
> amdgpu-y := amdgpu_drv.o
> 
> # add KMS driver
>  @@ -54,7 +57,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
>    amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o 
>  amdgpu_atomfirmware.o \
>    amdgpu_vf_error.o amdgpu_sched.o amdgpu_debugfs.o amdgpu_ids.o 
>  \
>    amdgpu_gmc.o amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o 
>  amdgpu_vm_cpu.o \
>  -   amdgpu_vm_sdma.o amdgpu_pmu.o amdgpu_discovery.o 
>  amdgpu_ras_eeprom.o
>  +   amdgpu_vm_sdma.o amdgpu_pmu.o amdgpu_discovery.o 
>  amdgpu_ras_eeprom.o smu_v11_0_i2c.o
> 
> amdgpu-$(CONFIG_PERF_EVENTS) += amdgpu_pmu.o
> 
>  diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c 
>  b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
>  index bf07515..e6b2e17 100644
>  --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
>  +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
>  @@ -25,6 +25,7 @@
> #include "amdgpu.h"
> #include "amdgpu_ras.h"
> #include 
>  +#include "smu_v11_0_i2c.h"
> 
> #define EEPROM_I2C_TARGET_ADDR 0xA0
> 
>  @@ -111,7 +112,7 @@ int amdgpu_ras_eeprom_init(struct 
>  amdgpu_ras_eeprom_control *control)
> 
>    switch (adev->asic_type) {
>    case CHIP_VEGA20:
>  -   /*TODO Add MI-60 */
>  +   ret = 
>  smu_v11_0_i2c_eeprom_control_init(&control->eeprom_accessor);
>    break;
> 
>    default:
>  @@ -163,7 +164,7 @@ void amdgpu_ras_eeprom_fini(struct 
>  amdgpu_ras_eeprom_control *control)
> 
>    switch (adev->asic_type) {
>    case CHIP_VEGA20:
>  -   /*TODO Add MI-60 */
>  +   
>  smu_v11_0_i2c_eeprom_control_fini(&control->eeprom_accessor);
>    break;
> 
>    default:
>  diff --git a/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c 
>  b/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c
>  new file mode 100644
>  index 000..24405fa
>  --- /dev/null
>  +++ b/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c
>  @@ -0,0 +1,710 @@
>  +/*
>  + * Copyright 2019 Advanced Micro Devices, Inc.
>  + *
>  + * Permission is hereby granted, free of charge, to any person 
>  obtaining a
>  + * copy of this software and associated documentation files (the 
>  "Software"),
>  + * to deal in the Software without restriction, including without 
>  limitation
>  + * the rights to use, copy, modify, merge, publish, distribute, 
>  sublicense,
>  + * and/or sell copies of the Software, and to permit persons to whom the
>  + * Software is furnished to do so, subject to the following conditions:
>  + *
>  + * The above copyright notice and this permission notice shall be 
>  included in
>  + * all copies or substantial portions of the Software.
>  + *
>  + * TH

Re: [PATCH][drm-next] drm/amd/display: fix a potential null pointer dereference

2019-08-22 Thread Harry Wentland
On 2019-08-16 6:10 p.m., Colin King wrote:
> From: Colin Ian King 
> 
> Currently the pointer init_data is dereferenced on the assignment
> of fw_info before init_data is sanity checked to see if it is null.
> Fix te potential null pointer dereference on init_data by only
> performing dereference after it is null checked.
> 
> Addresses-Coverity: ("Dereference before null check")
> Fixes: 9adc8050bf3c ("drm/amd/display: make firmware info only load once 
> during dc_bios create")
> Signed-off-by: Colin Ian King 

Reviewed-by: Harry Wentland 

Harry

> ---
>  drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c | 3 ++-
>  1 file changed, 2 insertions(+), 1 deletion(-)
> 
> diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c 
> b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
> index bee81bf288be..926954c804a6 100644
> --- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
> +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
> @@ -1235,7 +1235,7 @@ static bool calc_pll_max_vco_construct(
>   struct calc_pll_clock_source_init_data *init_data)
>  {
>   uint32_t i;
> - struct dc_firmware_info *fw_info = &init_data->bp->fw_info;
> + struct dc_firmware_info *fw_info;
>   if (calc_pll_cs == NULL ||
>   init_data == NULL ||
>   init_data->bp == NULL)
> @@ -1244,6 +1244,7 @@ static bool calc_pll_max_vco_construct(
>   if (init_data->bp->fw_info_valid)
>   return false;
>  
> + fw_info = &init_data->bp->fw_info;
>   calc_pll_cs->ctx = init_data->ctx;
>   calc_pll_cs->ref_freq_khz = fw_info->pll_info.crystal_frequency;
>   calc_pll_cs->min_vco_khz =
> 
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 1/4] drm/amdgpu/powerplay/smu7: enable mclk switching if monitors are synced

2019-08-22 Thread Alex Deucher
If DC has synced the displays, we can enable mclk switching to
save power.

Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 7 ---
 1 file changed, 4 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c 
b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 3c1084de5d59..34f95e0e3ea4 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -2956,9 +2956,10 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr 
*hwmgr,
if (hwmgr->display_config->num_display == 0)
disable_mclk_switching = false;
else
-   disable_mclk_switching = ((1 < 
hwmgr->display_config->num_display) ||
- disable_mclk_switching_for_frame_lock 
||
- smu7_vblank_too_short(hwmgr, 
hwmgr->display_config->min_vblank_time));
+   disable_mclk_switching = ((1 < 
hwmgr->display_config->num_display) &&
+ 
!hwmgr->display_config->multi_monitor_in_sync) ||
+   disable_mclk_switching_for_frame_lock ||
+   smu7_vblank_too_short(hwmgr, 
hwmgr->display_config->min_vblank_time);
 
sclk = smu7_ps->performance_levels[0].engine_clock;
mclk = smu7_ps->performance_levels[0].memory_clock;
-- 
2.20.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 0/4] Support mclk switching when monitors are in sync (v2)

2019-08-22 Thread Alex Deucher
This patch set enables mclk switching with multiple monitors when all
monitors are sync.  Normally mclk switching is not available with
multiple monitors because the vblank timing does not line up.  However,
if the timing is identical, the display driver can sync up the displays
in some cases.  Check for these cases and allow mclk switch when
possible.

Alex Deucher (4):
  drm/amdgpu/powerplay/smu7: enable mclk switching if monitors are
synced
  drm/amdgpu/powerplay/vega10: enable mclk switching if monitors are
synced
  drm/amd/display: update bw_calcs to take pipe sync into account (v3)
  drm/amdgpu/display: add flag for multi-display mclk switching

 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c |  3 ++
 .../gpu/drm/amd/display/dc/calcs/dce_calcs.c  | 33 +--
 drivers/gpu/drm/amd/display/dc/dc.h   |  2 +-
 drivers/gpu/drm/amd/include/amd_shared.h  |  1 +
 .../gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c  |  7 ++--
 .../drm/amd/powerplay/hwmgr/vega10_hwmgr.c|  3 +-
 6 files changed, 42 insertions(+), 7 deletions(-)

-- 
2.20.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 4/4] drm/amdgpu/display: add flag for multi-display mclk switching

2019-08-22 Thread Alex Deucher
Add a dcfeaturemask flag for mclk switching.  Disable by default;
enable once the feature has seen more testing.

Set amdgpu.dcfeaturemask=2 on the kernel command line in grub
to enable this.

Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 3 +++
 drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c  | 5 -
 drivers/gpu/drm/amd/display/dc/dc.h   | 2 +-
 drivers/gpu/drm/amd/include/amd_shared.h  | 1 +
 4 files changed, 9 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 45298269744d..cb86ccf48bd9 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -694,6 +694,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (amdgpu_dc_feature_mask & DC_FBC_MASK)
init_data.flags.fbc_support = true;
 
+   if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
+   init_data.flags.multi_mon_pp_mclk_switch = true;
+
init_data.flags.power_down_display_on_boot = true;
 
 #ifdef CONFIG_DRM_AMD_DC_DCN2_0
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c 
b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
index eca681d9d7f5..a1d49256fab7 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
@@ -3025,7 +3025,10 @@ bool bw_calcs(struct dc_context *ctx,
 
populate_initial_data(pipe, pipe_count, data);
 
-   calcs_output->all_displays_in_sync = all_displays_in_sync(pipe, 
pipe_count);
+   if (ctx->dc->config.multi_mon_pp_mclk_switch)
+   calcs_output->all_displays_in_sync = all_displays_in_sync(pipe, 
pipe_count);
+   else
+   calcs_output->all_displays_in_sync = false;
 
if (data->number_of_displays != 0) {
uint8_t yclk_lvl, sclk_lvl;
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h 
b/drivers/gpu/drm/amd/display/dc/dc.h
index 3ef269f82478..5d4a2a9228f0 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -220,7 +220,7 @@ struct dc_config {
bool power_down_display_on_boot;
bool edp_not_connected;
bool forced_clocks;
-
+   bool multi_mon_pp_mclk_switch;
 };
 
 enum visual_confirm {
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h 
b/drivers/gpu/drm/amd/include/amd_shared.h
index a0a7211438f2..8889aaceec60 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -142,6 +142,7 @@ enum PP_FEATURE_MASK {
 
 enum DC_FEATURE_MASK {
DC_FBC_MASK = 0x1,
+   DC_MULTI_MON_PP_MCLK_SWITCH_MASK = 0x2,
 };
 
 enum amd_dpm_forced_level;
-- 
2.20.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 2/4] drm/amdgpu/powerplay/vega10: enable mclk switching if monitors are synced

2019-08-22 Thread Alex Deucher
If DC has synced the displays, we can enable mclk switching to
save power.

Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c 
b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index 948c54cb9c5d..d08493b67b67 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -3220,7 +3220,8 @@ static int vega10_apply_state_adjust_rules(struct 
pp_hwmgr *hwmgr,
if (hwmgr->display_config->num_display == 0)
disable_mclk_switching = false;
else
-   disable_mclk_switching = (hwmgr->display_config->num_display > 
1) ||
+   disable_mclk_switching = ((1 < 
hwmgr->display_config->num_display) &&
+ 
!hwmgr->display_config->multi_monitor_in_sync) ||
disable_mclk_switching_for_frame_lock ||
disable_mclk_switching_for_vr ||
force_mclk_high;
-- 
2.20.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 3/4] drm/amd/display: update bw_calcs to take pipe sync into account (v3)

2019-08-22 Thread Alex Deucher
Properly set all_displays_in_sync so that when the data is
propagated to powerplay, it's set properly and we can enable
mclk switching when all monitors are in sync.

v2: fix logic, clean up
v3: check for blending chains, simplify logic

Signed-off-by: Alex Deucher 
---
 .../gpu/drm/amd/display/dc/calcs/dce_calcs.c  | 30 +--
 1 file changed, 28 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c 
b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
index 9f12e21f8b9b..eca681d9d7f5 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
@@ -25,6 +25,7 @@
 
 #include 
 
+#include "resource.h"
 #include "dm_services.h"
 #include "dce_calcs.h"
 #include "dc.h"
@@ -2977,6 +2978,32 @@ static void populate_initial_data(
data->number_of_displays = num_displays;
 }
 
+static bool all_displays_in_sync(const struct pipe_ctx pipe[],
+int pipe_count)
+{
+   const struct pipe_ctx *active_pipes[MAX_PIPES];
+   int i, num_active_pipes = 0;
+
+   for (i = 0; i < pipe_count; i++) {
+   if (!pipe[i].stream || pipe[i].top_pipe)
+   continue;
+
+   active_pipes[num_active_pipes++] = &pipe[i];
+   }
+
+   if (!num_active_pipes)
+   return false;
+
+   for (i = 1; i < num_active_pipes; ++i) {
+   if (!resource_are_streams_timing_synchronizable(
+   active_pipes[0]->stream, active_pipes[i]->stream)) {
+   return false;
+   }
+   }
+
+   return true;
+}
+
 /**
  * Return:
  * true -  Display(s) configuration supported.
@@ -2998,8 +3025,7 @@ bool bw_calcs(struct dc_context *ctx,
 
populate_initial_data(pipe, pipe_count, data);
 
-   /*TODO: this should be taken out calcs output and assigned during 
timing sync for pplib use*/
-   calcs_output->all_displays_in_sync = false;
+   calcs_output->all_displays_in_sync = all_displays_in_sync(pipe, 
pipe_count);
 
if (data->number_of_displays != 0) {
uint8_t yclk_lvl, sclk_lvl;
-- 
2.20.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: [PATCH 4/4] drm/amdgpu/display: add flag for multi-display mclk switching

2019-08-22 Thread Kazlauskas, Nicholas
On 8/22/19 3:30 PM, Alex Deucher wrote:
> Add a dcfeaturemask flag for mclk switching.  Disable by default;
> enable once the feature has seen more testing.
> 
> Set amdgpu.dcfeaturemask=2 on the kernel command line in grub
> to enable this.
> 
> Signed-off-by: Alex Deucher 

Patches 3 and 4 are

Reviewed-by: Nicholas Kazlauskas 

> ---
>   drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 3 +++
>   drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c  | 5 -
>   drivers/gpu/drm/amd/display/dc/dc.h   | 2 +-
>   drivers/gpu/drm/amd/include/amd_shared.h  | 1 +
>   4 files changed, 9 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
> b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
> index 45298269744d..cb86ccf48bd9 100644
> --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
> +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
> @@ -694,6 +694,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
>   if (amdgpu_dc_feature_mask & DC_FBC_MASK)
>   init_data.flags.fbc_support = true;
>   
> + if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
> + init_data.flags.multi_mon_pp_mclk_switch = true;
> +
>   init_data.flags.power_down_display_on_boot = true;
>   
>   #ifdef CONFIG_DRM_AMD_DC_DCN2_0
> diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c 
> b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
> index eca681d9d7f5..a1d49256fab7 100644
> --- a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
> +++ b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
> @@ -3025,7 +3025,10 @@ bool bw_calcs(struct dc_context *ctx,
>   
>   populate_initial_data(pipe, pipe_count, data);
>   
> - calcs_output->all_displays_in_sync = all_displays_in_sync(pipe, 
> pipe_count);
> + if (ctx->dc->config.multi_mon_pp_mclk_switch)
> + calcs_output->all_displays_in_sync = all_displays_in_sync(pipe, 
> pipe_count);
> + else
> + calcs_output->all_displays_in_sync = false;
>   
>   if (data->number_of_displays != 0) {
>   uint8_t yclk_lvl, sclk_lvl;
> diff --git a/drivers/gpu/drm/amd/display/dc/dc.h 
> b/drivers/gpu/drm/amd/display/dc/dc.h
> index 3ef269f82478..5d4a2a9228f0 100644
> --- a/drivers/gpu/drm/amd/display/dc/dc.h
> +++ b/drivers/gpu/drm/amd/display/dc/dc.h
> @@ -220,7 +220,7 @@ struct dc_config {
>   bool power_down_display_on_boot;
>   bool edp_not_connected;
>   bool forced_clocks;
> -
> + bool multi_mon_pp_mclk_switch;
>   };
>   
>   enum visual_confirm {
> diff --git a/drivers/gpu/drm/amd/include/amd_shared.h 
> b/drivers/gpu/drm/amd/include/amd_shared.h
> index a0a7211438f2..8889aaceec60 100644
> --- a/drivers/gpu/drm/amd/include/amd_shared.h
> +++ b/drivers/gpu/drm/amd/include/amd_shared.h
> @@ -142,6 +142,7 @@ enum PP_FEATURE_MASK {
>   
>   enum DC_FEATURE_MASK {
>   DC_FBC_MASK = 0x1,
> + DC_MULTI_MON_PP_MCLK_SWITCH_MASK = 0x2,
>   };
>   
>   enum amd_dpm_forced_level;
> 

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: [PATCH] drm/amdgpu/powerplay: remove redundant assignment to variable baco_state

2019-08-22 Thread Alex Deucher
On Thu, Aug 22, 2019 at 9:09 AM Colin King  wrote:
>
> From: Colin Ian King 
>
> Variable baco_state is initialized to a value that is never read and it is
> re-assigned later. The initialization is redundant and can be removed.
>
> Addresses-Coverity: ("Unused Value")
> Signed-off-by: Colin Ian King 

Applied.  Thanks!

Alex

> ---
>  drivers/gpu/drm/amd/powerplay/smu_v11_0.c | 2 +-
>  1 file changed, 1 insertion(+), 1 deletion(-)
>
> diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c 
> b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
> index 89749b1d2019..a4aba8576900 100644
> --- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
> +++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
> @@ -1656,7 +1656,7 @@ static bool smu_v11_0_baco_is_support(struct 
> smu_context *smu)
>  static enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu)
>  {
> struct smu_baco_context *smu_baco = &smu->smu_baco;
> -   enum smu_baco_state baco_state = SMU_BACO_STATE_EXIT;
> +   enum smu_baco_state baco_state;
>
> mutex_lock(&smu_baco->mutex);
> baco_state = smu_baco->state;
> --
> 2.20.1
>
> ___
> dri-devel mailing list
> dri-de...@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH][drm-next] drm/amd/display: fix a potential null pointer dereference

2019-08-22 Thread Alex Deucher
On Thu, Aug 22, 2019 at 3:21 PM Harry Wentland  wrote:
>
> On 2019-08-16 6:10 p.m., Colin King wrote:
> > From: Colin Ian King 
> >
> > Currently the pointer init_data is dereferenced on the assignment
> > of fw_info before init_data is sanity checked to see if it is null.
> > Fix te potential null pointer dereference on init_data by only
> > performing dereference after it is null checked.
> >
> > Addresses-Coverity: ("Dereference before null check")
> > Fixes: 9adc8050bf3c ("drm/amd/display: make firmware info only load once 
> > during dc_bios create")
> > Signed-off-by: Colin Ian King 
>
> Reviewed-by: Harry Wentland 
>

Applied.  Thanks!

Alex

> Harry
>
> > ---
> >  drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c | 3 ++-
> >  1 file changed, 2 insertions(+), 1 deletion(-)
> >
> > diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c 
> > b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
> > index bee81bf288be..926954c804a6 100644
> > --- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
> > +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
> > @@ -1235,7 +1235,7 @@ static bool calc_pll_max_vco_construct(
> >   struct calc_pll_clock_source_init_data *init_data)
> >  {
> >   uint32_t i;
> > - struct dc_firmware_info *fw_info = &init_data->bp->fw_info;
> > + struct dc_firmware_info *fw_info;
> >   if (calc_pll_cs == NULL ||
> >   init_data == NULL ||
> >   init_data->bp == NULL)
> > @@ -1244,6 +1244,7 @@ static bool calc_pll_max_vco_construct(
> >   if (init_data->bp->fw_info_valid)
> >   return false;
> >
> > + fw_info = &init_data->bp->fw_info;
> >   calc_pll_cs->ctx = init_data->ctx;
> >   calc_pll_cs->ref_freq_khz = fw_info->pll_info.crystal_frequency;
> >   calc_pll_cs->min_vco_khz =
> >
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH] drm/drm_connector: add additional aspect ratio values

2019-08-22 Thread Wayne Lin
For HDMI2.0 CTS item - HF1-35, it verifies if the source generates
video timing "64:27" video format correctly.

eg: (vic-76) 1920x1080p@60Hz,24bpp

This patch add on "64:27" and "256:135" to drm_aspect_ratio_enum_list.
Thereafter, one can specify the aspect ratio to "64:27" or "256:135"
after creating aspect ratio property.

Change-Id: Ifc9df54e8e8f78e70960fcd737a3a57e49c81152
Signed-off-by: Wayne Lin 
---
 drivers/gpu/drm/drm_connector.c | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 3a0cacb71235..c0629a01d08e 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -772,6 +772,8 @@ static const struct drm_prop_enum_list 
drm_aspect_ratio_enum_list[] = {
{ DRM_MODE_PICTURE_ASPECT_NONE, "Automatic" },
{ DRM_MODE_PICTURE_ASPECT_4_3, "4:3" },
{ DRM_MODE_PICTURE_ASPECT_16_9, "16:9" },
+   { DRM_MODE_PICTURE_ASPECT_64_27, "64:27" },
+   { DRM_MODE_PICTURE_ASPECT_256_135, "256:135" },
 };
 
 static const struct drm_prop_enum_list drm_content_type_enum_list[] = {
-- 
2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH] drm: Add LTTPR defines for DP 1.4

2019-08-22 Thread Siqueira, Rodrigo
DP 1.4 specification defines Link Training Tunable PHY Repeater (LTTPR)
which is required to add support for systems with Thunderbolt or other
repeater devices.

Cc: Abdoulaye Berthe 
Cc: Harry Wentland 
Cc: Leo Li 
Signed-off-by: Rodrigo Siqueira 
Signed-off-by: Abdoulaye Berthe 
---
 include/drm/drm_dp_helper.h | 26 ++
 1 file changed, 26 insertions(+)

diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 8364502f92cf..8336d960da7f 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -134,6 +134,32 @@
 #define DP_SUPPORTED_LINK_RATES0x010 /* eDP 1.4 */
 # define DP_MAX_SUPPORTED_RATES 8  /* 16-bit 
little-endian */
 
+/** Link Training (LT)-tunable Physical Repeaters - DP 1.4 **/
+#define DP_LTTPR_REV  0xf
+#define DP_LTTPR_MAX_LINK_RATE0xf0001
+#define DP_LTTPR_REPEATER_CNT 0xf0002
+#define DP_LTTPR_REPEATER_MODE0xf0003
+#define DP_LTTPR_MAX_LANE_CNT 0xf0004
+#define DP_LTTPR_EXTENDED_WAIT_TIMEOUT0xf0005
+#define DP_LTTPR_TRAINING_PATTERN_SET_REPEATER1   0xf0010
+#define DP_LTTPR_TRAINING_LANE0_SET_REPEATER1 0xf0011
+#define DP_LTTPR_TRAINING_LANE1_SET_REPEATER1 0xf0012
+#define DP_LTTPR_TRAINING_LANE2_SET_REPEATER1 0xf0013
+#define DP_LTTPR_TRAINING_LANE3_SET_REPEATER1 0xf0014
+#define DP_LTTPR_TRAINING_AUX_RD_INTERVAL_REPEATER1   0xf0020
+#define DP_LTTPR_TRANSMITTER_CAPABILITY_REPEATER1 0xf0021
+#define DP_LTTPR_LANE0_1_STATUS_REPEATER1 0xf0030
+#define DP_LTTPR_LANE2_3_STATUS_REPEATER1 0xf0031
+#define DP_LTTPR_LANE_ALIGN_STATUS_UPDATED_REPEATER1  0xf0032
+#define DP_LTTPR_ADJUST_REQUEST_LANE0_1_REPEATER1 0xf0033
+#define DP_LTTPR_ADJUST_REQUEST_LANE2_3_REPEATER1 0xf0034
+#define DP_LTTPR_SYMBOL_ERROR_COUNT_LANE0_REPEATER1   0xf0035
+#define DP_LTTPR_SYMBOL_ERROR_COUNT_LANE1_REPEATER1   0xf0037
+#define DP_LTTPR_SYMBOL_ERROR_COUNT_LANE2_REPEATER1   0xf0039
+#define DP_LTTPR_SYMBOL_ERROR_COUNT_LANE3_REPEATER1   0xf003b
+#define DP_REPEATER_CONFIGURATION_AND_STATUS_OFFSET   0x50
+#define DP_LTTPR_FEC_STATUS_REPEATER1 0xf0290
+
 /* Multiple stream transport */
 #define DP_FAUX_CAP0x020   /* 1.2 */
 # define DP_FAUX_CAP_1 (1 << 0)
-- 
2.23.0


signature.asc
Description: PGP signature
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

RE: [PATCH v4 1/4] drm/amdgpu: Add RAS EEPROM table.

2019-08-22 Thread Zhou1, Tao


From: Grodzovsky, Andrey  
Sent: 2019年8月22日 23:07
To: Zhou1, Tao ; amd-gfx@lists.freedesktop.org
Cc: Deucher, Alexander ; Pan, Xinhui 
; Zhang, Hawking ; Tuikov, Luben 
; Lazar, Lijo ; Quan, Evan 
; Panariti, David ; Russell, Kent 

Subject: Re: [PATCH v4 1/4] drm/amdgpu: Add RAS EEPROM table.


On 8/22/19 12:13 AM, Zhou1, Tao wrote:


-Original Message-
From: Andrey Grodzovsky mailto:andrey.grodzov...@amd.com
Sent: 2019年8月22日 4:02
To: mailto:amd-gfx@lists.freedesktop.org
Cc: Deucher, Alexander mailto:alexander.deuc...@amd.com; Pan, Xinhui
mailto:xinhui@amd.com; Zhang, Hawking mailto:hawking.zh...@amd.com;
Tuikov, Luben mailto:luben.tui...@amd.com; Lazar, Lijo 
mailto:lijo.la...@amd.com;
Quan, Evan mailto:evan.q...@amd.com; Panariti, David
mailto:david.panar...@amd.com; Russell, Kent mailto:kent.russ...@amd.com; Zhou1,
Tao mailto:tao.zh...@amd.com; Grodzovsky, Andrey
mailto:andrey.grodzov...@amd.com
Subject: [PATCH v4 1/4] drm/amdgpu: Add RAS EEPROM table.

Add RAS EEPROM table manager to eanble RAS errors to be stored upon
appearance and retrived on driver load.

v2: Fix some prints.

v3:
Fix checksum calculation.
Make table record and header structs packed to do correct byte value sum.
Fix record crossing EEPROM page boundry.

v4:
Fix byte sum val calculation for record - look at sizeof(record).
Fix some style comments.

Signed-off-by: Andrey Grodzovsky mailto:andrey.grodzov...@amd.com
---
 drivers/gpu/drm/amd/amdgpu/Makefile|   2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h|   3 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c | 482
+
drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h |  90 +
 4 files changed, 576 insertions(+), 1 deletion(-)  create mode 100644
drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
 create mode 100644
drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h

diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile
b/drivers/gpu/drm/amd/amdgpu/Makefile
index 28d76bd..f016cf1 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -54,7 +54,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o
amdgpu_atomfirmware.o \
amdgpu_vf_error.o amdgpu_sched.o amdgpu_debugfs.o
amdgpu_ids.o \
amdgpu_gmc.o amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o
amdgpu_vm_cpu.o \
-   amdgpu_vm_sdma.o amdgpu_discovery.o
+   amdgpu_vm_sdma.o amdgpu_pmu.o amdgpu_discovery.o
amdgpu_ras_eeprom.o

 amdgpu-$(CONFIG_PERF_EVENTS) += amdgpu_pmu.o

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
index 2765f2d..8d5bcd8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
@@ -29,6 +29,7 @@
 #include "amdgpu.h"
 #include "amdgpu_psp.h"
 #include "ta_ras_if.h"
+#include "amdgpu_ras_eeprom.h"

 enum amdgpu_ras_block {
AMDGPU_RAS_BLOCK__UMC = 0,
@@ -333,6 +334,8 @@ struct amdgpu_ras {
struct mutex recovery_lock;

uint32_t flags;
+
+   struct amdgpu_ras_eeprom_control eeprom_control;
 };

 struct ras_fs_data {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
new file mode 100644
index 000..bf07515
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
@@ -0,0 +1,482 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person
+obtaining a
+ * copy of this software and associated documentation files (the
+"Software"),
+ * to deal in the Software without restriction, including without
+limitation
+ * the rights to use, copy, modify, merge, publish, distribute,
+sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom
+the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO
EVENT
+SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM,
+DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR
THE USE
+OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "amdgpu_ras_eeprom.h"
+#include "amdgpu.h"
+#include "amdgpu_ras.h"
+#include 
+
+#define EEPROM_I2C_TARGET_ADDR 0xA0
+
+#define EEPROM_TABLE_HEADER_SIZE 20
+#define EEPROM_TABLE_RECORD_SIZE 24
[Tao] should we replace fixed value with sizeof for the two macros?

No, as I already explained before the EEPROM_TABLE_HEADER/RECORD_SIZE represent 
the actual size in bytes that those entities occupy in the EEPROM memory and as 
defined in the EEPROM table descripti

Re: [PATCH v4 1/4] drm/amdgpu: Add RAS EEPROM table.

2019-08-22 Thread Grodzovsky, Andrey
Sure, will add clarification comment.

Andrey


From: Zhou1, Tao 
Sent: 22 August 2019 22:16:56
To: Grodzovsky, Andrey; amd-gfx@lists.freedesktop.org
Cc: Deucher, Alexander; Pan, Xinhui; Zhang, Hawking; Tuikov, Luben; Lazar, 
Lijo; Quan, Evan; Panariti, David; Russell, Kent
Subject: RE: [PATCH v4 1/4] drm/amdgpu: Add RAS EEPROM table.



From: Grodzovsky, Andrey 
Sent: 2019年8月22日 23:07
To: Zhou1, Tao ; amd-gfx@lists.freedesktop.org
Cc: Deucher, Alexander ; Pan, Xinhui 
; Zhang, Hawking ; Tuikov, Luben 
; Lazar, Lijo ; Quan, Evan 
; Panariti, David ; Russell, Kent 

Subject: Re: [PATCH v4 1/4] drm/amdgpu: Add RAS EEPROM table.


On 8/22/19 12:13 AM, Zhou1, Tao wrote:


-Original Message-
From: Andrey Grodzovsky mailto:andrey.grodzov...@amd.com
Sent: 2019年8月22日 4:02
To: mailto:amd-gfx@lists.freedesktop.org
Cc: Deucher, Alexander mailto:alexander.deuc...@amd.com; Pan, Xinhui
mailto:xinhui@amd.com; Zhang, Hawking mailto:hawking.zh...@amd.com;
Tuikov, Luben mailto:luben.tui...@amd.com; Lazar, Lijo 
mailto:lijo.la...@amd.com;
Quan, Evan mailto:evan.q...@amd.com; Panariti, David
mailto:david.panar...@amd.com; Russell, Kent mailto:kent.russ...@amd.com; Zhou1,
Tao mailto:tao.zh...@amd.com; Grodzovsky, Andrey
mailto:andrey.grodzov...@amd.com
Subject: [PATCH v4 1/4] drm/amdgpu: Add RAS EEPROM table.

Add RAS EEPROM table manager to eanble RAS errors to be stored upon
appearance and retrived on driver load.

v2: Fix some prints.

v3:
Fix checksum calculation.
Make table record and header structs packed to do correct byte value sum.
Fix record crossing EEPROM page boundry.

v4:
Fix byte sum val calculation for record - look at sizeof(record).
Fix some style comments.

Signed-off-by: Andrey Grodzovsky mailto:andrey.grodzov...@amd.com
---
 drivers/gpu/drm/amd/amdgpu/Makefile|   2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h|   3 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c | 482
+
drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h |  90 +
 4 files changed, 576 insertions(+), 1 deletion(-)  create mode 100644
drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
 create mode 100644
drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h

diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile
b/drivers/gpu/drm/amd/amdgpu/Makefile
index 28d76bd..f016cf1 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -54,7 +54,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o
amdgpu_atomfirmware.o \
amdgpu_vf_error.o amdgpu_sched.o amdgpu_debugfs.o
amdgpu_ids.o \
amdgpu_gmc.o amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o
amdgpu_vm_cpu.o \
-   amdgpu_vm_sdma.o amdgpu_discovery.o
+   amdgpu_vm_sdma.o amdgpu_pmu.o amdgpu_discovery.o
amdgpu_ras_eeprom.o

 amdgpu-$(CONFIG_PERF_EVENTS) += amdgpu_pmu.o

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
index 2765f2d..8d5bcd8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
@@ -29,6 +29,7 @@
 #include "amdgpu.h"
 #include "amdgpu_psp.h"
 #include "ta_ras_if.h"
+#include "amdgpu_ras_eeprom.h"

 enum amdgpu_ras_block {
AMDGPU_RAS_BLOCK__UMC = 0,
@@ -333,6 +334,8 @@ struct amdgpu_ras {
struct mutex recovery_lock;

uint32_t flags;
+
+   struct amdgpu_ras_eeprom_control eeprom_control;
 };

 struct ras_fs_data {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
new file mode 100644
index 000..bf07515
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
@@ -0,0 +1,482 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person
+obtaining a
+ * copy of this software and associated documentation files (the
+"Software"),
+ * to deal in the Software without restriction, including without
+limitation
+ * the rights to use, copy, modify, merge, publish, distribute,
+sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom
+the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO
EVENT
+SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM,
+DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR
THE USE
+OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "amdgpu_ras_eeprom.h"
+#include "amdgpu.h"
+#include "amdgpu_ras.h"
+#include 
+
+#de

[PATCH] drm/amdgpu: introduce vram lost paramter for reset function

2019-08-22 Thread Monk Liu
for SOC15/vega10 the BACO reset would introduce vram lost in
the high end address range and current kmd's vram lost
checking cannot catch it since it only check visible frame buffer

TODO:
to confirm if mode1/2 reset would introduce vram lost

Signed-off-by: Monk Liu 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h|  4 ++--
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 12 +++-
 drivers/gpu/drm/amd/amdgpu/cik.c   |  2 +-
 drivers/gpu/drm/amd/amdgpu/nv.c| 10 +++---
 drivers/gpu/drm/amd/amdgpu/si.c|  2 +-
 drivers/gpu/drm/amd/amdgpu/soc15.c |  4 +++-
 drivers/gpu/drm/amd/amdgpu/vi.c|  2 +-
 7 files changed, 22 insertions(+), 14 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index f6ae565..1fe3756 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -552,7 +552,7 @@ struct amdgpu_asic_funcs {
int (*read_register)(struct amdgpu_device *adev, u32 se_num,
 u32 sh_num, u32 reg_offset, u32 *value);
void (*set_vga_state)(struct amdgpu_device *adev, bool state);
-   int (*reset)(struct amdgpu_device *adev);
+   int (*reset)(struct amdgpu_device *adev, bool *lost);
enum amd_reset_method (*reset_method)(struct amdgpu_device *adev);
/* get the reference clock */
u32 (*get_xclk)(struct amdgpu_device *adev);
@@ -1136,7 +1136,7 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
  * ASICs macro.
  */
 #define amdgpu_asic_set_vga_state(adev, state) 
(adev)->asic_funcs->set_vga_state((adev), (state))
-#define amdgpu_asic_reset(adev) (adev)->asic_funcs->reset((adev))
+#define amdgpu_asic_reset(adev, lost) (adev)->asic_funcs->reset((adev), (lost))
 #define amdgpu_asic_reset_method(adev) (adev)->asic_funcs->reset_method((adev))
 #define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev))
 #define amdgpu_asic_set_uvd_clocks(adev, v, d) 
(adev)->asic_funcs->set_uvd_clocks((adev), (v), (d))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 02b3e7d..8668cb8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2546,7 +2546,7 @@ static void amdgpu_device_xgmi_reset_func(struct 
work_struct *__work)
struct amdgpu_device *adev =
container_of(__work, struct amdgpu_device, xgmi_reset_work);
 
-   adev->asic_reset_res =  amdgpu_asic_reset(adev);
+   adev->asic_reset_res =  amdgpu_asic_reset(adev, NULL);
if (adev->asic_reset_res)
DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
 adev->asic_reset_res, adev->ddev->unique);
@@ -2751,7 +2751,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
 *  E.g., driver was not cleanly unloaded previously, etc.
 */
if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
-   r = amdgpu_asic_reset(adev);
+   r = amdgpu_asic_reset(adev, NULL);
if (r) {
dev_err(adev->dev, "asic reset on init failed\n");
goto failed;
@@ -3084,7 +3084,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool 
suspend, bool fbcon)
pci_disable_device(dev->pdev);
pci_set_power_state(dev->pdev, PCI_D3hot);
} else {
-   r = amdgpu_asic_reset(adev);
+   r = amdgpu_asic_reset(adev, NULL);
if (r)
DRM_ERROR("amdgpu asic reset failed\n");
}
@@ -3604,7 +3604,7 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info 
*hive,
if (!queue_work(system_highpri_wq, 
&tmp_adev->xgmi_reset_work))
r = -EALREADY;
} else
-   r = amdgpu_asic_reset(tmp_adev);
+   r = amdgpu_asic_reset(tmp_adev, &vram_lost);
 
if (r) {
DRM_ERROR("ASIC reset failed with error, %d for 
drm dev, %s",
@@ -3645,7 +3645,9 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info 
*hive,
if (r)
goto out;
 
-   vram_lost = 
amdgpu_device_check_vram_lost(tmp_adev);
+   if (!vram_lost)
+   vram_lost = 
amdgpu_device_check_vram_lost(tmp_adev);
+
if (vram_lost) {
DRM_INFO("VRAM is lost due to GPU 
reset!\n");

atomic_inc(&tmp_adev->vram_lost_counter);
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index 7b63d7a..0f25b82 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu

RE: [PATCH 4/4] drm/amdgpu/display: add flag for multi-display mclk switching

2019-08-22 Thread Quan, Evan
Patch1, 2 are reviewed-by: Evan Quan 
Patch 3,4 are acked-by: Evan Quan 

> -Original Message-
> From: amd-gfx  On Behalf Of Alex
> Deucher
> Sent: Friday, August 23, 2019 3:31 AM
> To: amd-gfx@lists.freedesktop.org
> Cc: Deucher, Alexander 
> Subject: [PATCH 4/4] drm/amdgpu/display: add flag for multi-display mclk
> switching
> 
> Add a dcfeaturemask flag for mclk switching.  Disable by default; enable once
> the feature has seen more testing.
> 
> Set amdgpu.dcfeaturemask=2 on the kernel command line in grub to enable
> this.
> 
> Signed-off-by: Alex Deucher 
> ---
>  drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 3 +++
> drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c  | 5 -
>  drivers/gpu/drm/amd/display/dc/dc.h   | 2 +-
>  drivers/gpu/drm/amd/include/amd_shared.h  | 1 +
>  4 files changed, 9 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
> b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
> index 45298269744d..cb86ccf48bd9 100644
> --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
> +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
> @@ -694,6 +694,9 @@ static int amdgpu_dm_init(struct amdgpu_device
> *adev)
>   if (amdgpu_dc_feature_mask & DC_FBC_MASK)
>   init_data.flags.fbc_support = true;
> 
> + if (amdgpu_dc_feature_mask &
> DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
> + init_data.flags.multi_mon_pp_mclk_switch = true;
> +
>   init_data.flags.power_down_display_on_boot = true;
> 
>  #ifdef CONFIG_DRM_AMD_DC_DCN2_0
> diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
> b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
> index eca681d9d7f5..a1d49256fab7 100644
> --- a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
> +++ b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
> @@ -3025,7 +3025,10 @@ bool bw_calcs(struct dc_context *ctx,
> 
>   populate_initial_data(pipe, pipe_count, data);
> 
> - calcs_output->all_displays_in_sync = all_displays_in_sync(pipe,
> pipe_count);
> + if (ctx->dc->config.multi_mon_pp_mclk_switch)
> + calcs_output->all_displays_in_sync =
> all_displays_in_sync(pipe, pipe_count);
> + else
> + calcs_output->all_displays_in_sync = false;
> 
>   if (data->number_of_displays != 0) {
>   uint8_t yclk_lvl, sclk_lvl;
> diff --git a/drivers/gpu/drm/amd/display/dc/dc.h
> b/drivers/gpu/drm/amd/display/dc/dc.h
> index 3ef269f82478..5d4a2a9228f0 100644
> --- a/drivers/gpu/drm/amd/display/dc/dc.h
> +++ b/drivers/gpu/drm/amd/display/dc/dc.h
> @@ -220,7 +220,7 @@ struct dc_config {
>   bool power_down_display_on_boot;
>   bool edp_not_connected;
>   bool forced_clocks;
> -
> + bool multi_mon_pp_mclk_switch;
>  };
> 
>  enum visual_confirm {
> diff --git a/drivers/gpu/drm/amd/include/amd_shared.h
> b/drivers/gpu/drm/amd/include/amd_shared.h
> index a0a7211438f2..8889aaceec60 100644
> --- a/drivers/gpu/drm/amd/include/amd_shared.h
> +++ b/drivers/gpu/drm/amd/include/amd_shared.h
> @@ -142,6 +142,7 @@ enum PP_FEATURE_MASK {
> 
>  enum DC_FEATURE_MASK {
>   DC_FBC_MASK = 0x1,
> + DC_MULTI_MON_PP_MCLK_SWITCH_MASK = 0x2,
>  };
> 
>  enum amd_dpm_forced_level;
> --
> 2.20.1
> 
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: [PATCH v4 1/4] drm/amdgpu: Add RAS EEPROM table.

2019-08-22 Thread Tuikov, Luben
Hi Andrey,

Looks good--thanks for addressing my comments dated 2019-08-07.

Regards,
Luben

On 2019-08-21 16:01, Andrey Grodzovsky wrote:
> Add RAS EEPROM table manager to eanble RAS errors to be stored
> upon appearance and retrived on driver load.
> 
> v2: Fix some prints.
> 
> v3:
> Fix checksum calculation.
> Make table record and header structs packed to do correct byte value sum.
> Fix record crossing EEPROM page boundry.
> 
> v4:
> Fix byte sum val calculation for record - look at sizeof(record).
> Fix some style comments.
> 
> Signed-off-by: Andrey Grodzovsky 
> ---
>  drivers/gpu/drm/amd/amdgpu/Makefile|   2 +-
>  drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h|   3 +
>  drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c | 482 
> +
>  drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h |  90 +
>  4 files changed, 576 insertions(+), 1 deletion(-)
>  create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
>  create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
> 
> diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile 
> b/drivers/gpu/drm/amd/amdgpu/Makefile
> index 28d76bd..f016cf1 100644
> --- a/drivers/gpu/drm/amd/amdgpu/Makefile
> +++ b/drivers/gpu/drm/amd/amdgpu/Makefile
> @@ -54,7 +54,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
>   amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o \
>   amdgpu_vf_error.o amdgpu_sched.o amdgpu_debugfs.o amdgpu_ids.o \
>   amdgpu_gmc.o amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o amdgpu_vm_cpu.o \
> - amdgpu_vm_sdma.o amdgpu_discovery.o
> + amdgpu_vm_sdma.o amdgpu_pmu.o amdgpu_discovery.o amdgpu_ras_eeprom.o
>  
>  amdgpu-$(CONFIG_PERF_EVENTS) += amdgpu_pmu.o
>  
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
> index 2765f2d..8d5bcd8 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
> @@ -29,6 +29,7 @@
>  #include "amdgpu.h"
>  #include "amdgpu_psp.h"
>  #include "ta_ras_if.h"
> +#include "amdgpu_ras_eeprom.h"
>  
>  enum amdgpu_ras_block {
>   AMDGPU_RAS_BLOCK__UMC = 0,
> @@ -333,6 +334,8 @@ struct amdgpu_ras {
>   struct mutex recovery_lock;
>  
>   uint32_t flags;
> +
> + struct amdgpu_ras_eeprom_control eeprom_control;
>  };
>  
>  struct ras_fs_data {
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
> new file mode 100644
> index 000..bf07515
> --- /dev/null
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
> @@ -0,0 +1,482 @@
> +/*
> + * Copyright 2019 Advanced Micro Devices, Inc.
> + *
> + * Permission is hereby granted, free of charge, to any person obtaining a
> + * copy of this software and associated documentation files (the "Software"),
> + * to deal in the Software without restriction, including without limitation
> + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
> + * and/or sell copies of the Software, and to permit persons to whom the
> + * Software is furnished to do so, subject to the following conditions:
> + *
> + * The above copyright notice and this permission notice shall be included in
> + * all copies or substantial portions of the Software.
> + *
> + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
> + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
> + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
> + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
> + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
> + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
> + * OTHER DEALINGS IN THE SOFTWARE.
> + *
> + */
> +
> +#include "amdgpu_ras_eeprom.h"
> +#include "amdgpu.h"
> +#include "amdgpu_ras.h"
> +#include 
> +
> +#define EEPROM_I2C_TARGET_ADDR 0xA0
> +
> +#define EEPROM_TABLE_HEADER_SIZE 20
> +#define EEPROM_TABLE_RECORD_SIZE 24
> +#define EEPROM_ADDRESS_SIZE 0x2
> +
> +/* Table hdr is 'AMDR' */
> +#define EEPROM_TABLE_HDR_VAL 0x414d4452
> +#define EEPROM_TABLE_VER 0x0001
> +
> +/* Assume 2 Mbit size */
> +#define EEPROM_SIZE_BYTES 256000
> +#define EEPROM_PAGE__SIZE_BYTES 256
> +#define EEPROM_HDR_START 0
> +#define EEPROM_RECORD_START (EEPROM_HDR_START + EEPROM_TABLE_HEADER_SIZE)
> +#define EEPROM_MAX_RECORD_NUM ((EEPROM_SIZE_BYTES - 
> EEPROM_TABLE_HEADER_SIZE) / EEPROM_TABLE_RECORD_SIZE)
> +#define EEPROM_ADDR_MSB_MASK GENMASK(17, 8)
> +
> +#define to_amdgpu_device(x) (container_of(x, struct amdgpu_ras, 
> eeprom_control))->adev
> +
> +static void __encode_table_header_to_buff(struct 
> amdgpu_ras_eeprom_table_header *hdr,
> +   unsigned char *buff)
> +{
> + uint32_t *pp = (uint32_t *) buff;
> +
> + pp[0] = cpu_to_le32(hdr->header);
> + pp[1] = cpu_to_le32(hdr->version);
> + pp[2] = cpu_to_le32(hdr->first_rec_offset);

Re: [PATCH v4 4/4] drm/amdgpu: Vega20 SMU I2C HW engine controller.

2019-08-22 Thread Tuikov, Luben
Hi Andrey,

I've the exact same comments about this patch as the ones
I posted on 2019-08-07.

Regards,
Luben

On 2019-08-21 16:01, Andrey Grodzovsky wrote:
> Implement HW I2C enigne controller to be used by the RAS EEPROM
> table manager. This is based on code from ATITOOLs.
> 
> v2:
> Rename the file and all function prefixes to smu_v11_0_i2c
> 
> By Luben's observation always fill the TX fifo to full so
> we don't have garbadge interpreted by the slave as valid data.
> 
> v3:
> Remove preemption disable as the HW I2C controller will not
> stop the clock on empty TX fifo and so it's not critical to
> keep not empty queue.
> Switch to fast mode 400 khz SCL clock for faster read and write.
> 
> Signed-off-by: Andrey Grodzovsky 
> ---
>  drivers/gpu/drm/amd/amdgpu/Makefile|   5 +-
>  drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c |   5 +-
>  drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c | 710 
> +
>  drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.h |  34 ++
>  4 files changed, 751 insertions(+), 3 deletions(-)
>  create mode 100644 drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c
>  create mode 100644 drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.h
> 
> diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile 
> b/drivers/gpu/drm/amd/amdgpu/Makefile
> index f016cf1..14733ff 100644
> --- a/drivers/gpu/drm/amd/amdgpu/Makefile
> +++ b/drivers/gpu/drm/amd/amdgpu/Makefile
> @@ -38,6 +38,9 @@ ccflags-y := -I$(FULL_AMD_PATH)/include/asic_reg \
>   -I$(FULL_AMD_DISPLAY_PATH)/amdgpu_dm \
>   -I$(FULL_AMD_PATH)/amdkfd
>  
> +
> +
> +
>  amdgpu-y := amdgpu_drv.o
>  
>  # add KMS driver
> @@ -54,7 +57,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
>   amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o \
>   amdgpu_vf_error.o amdgpu_sched.o amdgpu_debugfs.o amdgpu_ids.o \
>   amdgpu_gmc.o amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o amdgpu_vm_cpu.o \
> - amdgpu_vm_sdma.o amdgpu_pmu.o amdgpu_discovery.o amdgpu_ras_eeprom.o
> + amdgpu_vm_sdma.o amdgpu_pmu.o amdgpu_discovery.o amdgpu_ras_eeprom.o 
> smu_v11_0_i2c.o
>  
>  amdgpu-$(CONFIG_PERF_EVENTS) += amdgpu_pmu.o
>  
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
> index bf07515..e6b2e17 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
> @@ -25,6 +25,7 @@
>  #include "amdgpu.h"
>  #include "amdgpu_ras.h"
>  #include 
> +#include "smu_v11_0_i2c.h"
>  
>  #define EEPROM_I2C_TARGET_ADDR 0xA0
>  
> @@ -111,7 +112,7 @@ int amdgpu_ras_eeprom_init(struct 
> amdgpu_ras_eeprom_control *control)
>  
>   switch (adev->asic_type) {
>   case CHIP_VEGA20:
> - /*TODO Add MI-60 */
> + ret = 
> smu_v11_0_i2c_eeprom_control_init(&control->eeprom_accessor);
>   break;
>  
>   default:
> @@ -163,7 +164,7 @@ void amdgpu_ras_eeprom_fini(struct 
> amdgpu_ras_eeprom_control *control)
>  
>   switch (adev->asic_type) {
>   case CHIP_VEGA20:
> - /*TODO Add MI-60 */
> + smu_v11_0_i2c_eeprom_control_fini(&control->eeprom_accessor);
>   break;
>  
>   default:
> diff --git a/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c 
> b/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c
> new file mode 100644
> index 000..24405fa
> --- /dev/null
> +++ b/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c
> @@ -0,0 +1,710 @@
> +/*
> + * Copyright 2019 Advanced Micro Devices, Inc.
> + *
> + * Permission is hereby granted, free of charge, to any person obtaining a
> + * copy of this software and associated documentation files (the "Software"),
> + * to deal in the Software without restriction, including without limitation
> + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
> + * and/or sell copies of the Software, and to permit persons to whom the
> + * Software is furnished to do so, subject to the following conditions:
> + *
> + * The above copyright notice and this permission notice shall be included in
> + * all copies or substantial portions of the Software.
> + *
> + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
> + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
> + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
> + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
> + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
> + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
> + * OTHER DEALINGS IN THE SOFTWARE.
> + *
> + */
> +
> +#include "smuio/smuio_11_0_0_offset.h"
> +#include "smuio/smuio_11_0_0_sh_mask.h"
> +
> +#include "smu_v11_0_i2c.h"
> +#include "amdgpu.h"
> +#include "soc15_common.h"
> +#include 
> +#include "amdgpu_amdkfd.h"
> +#include 
> +#include "amdgpu_ras.h"
> +
> +/* error codes */
> +#define I2C_OK   0
> +#define I2C_NAK

[PATCH] drm/amd/powerplay: update cached feature enablement status V3

2019-08-22 Thread Evan Quan
Need to update in cache feature enablement status after pp_feature
settings. Another fix for the commit below:
drm/amd/powerplay: implment sysfs feature status function in smu

V2: update smu_feature_update_enable_state() and relates
V3: use bitmap_or and bitmap_andnot

Change-Id: I90e29b0d839df26825d5993212f6097c7ad4bebf
Signed-off-by: Evan Quan 
---
 drivers/gpu/drm/amd/powerplay/amdgpu_smu.c| 101 +-
 .../gpu/drm/amd/powerplay/inc/amdgpu_smu.h|   1 -
 2 files changed, 49 insertions(+), 53 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c 
b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
index 4df7fb6eaf3c..c8c00966a621 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -94,6 +94,52 @@ size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, 
char *buf)
return size;
 }
 
+static int smu_feature_update_enable_state(struct smu_context *smu,
+  uint64_t feature_mask,
+  bool enabled)
+{
+   struct smu_feature *feature = &smu->smu_feature;
+   uint32_t feature_low = 0, feature_high = 0;
+   int ret = 0;
+
+   if (!smu->pm_enabled)
+   return ret;
+
+   feature_low = (feature_mask >> 0 ) & 0x;
+   feature_high = (feature_mask >> 32) & 0x;
+
+   if (enabled) {
+   ret = smu_send_smc_msg_with_param(smu, 
SMU_MSG_EnableSmuFeaturesLow,
+ feature_low);
+   if (ret)
+   return ret;
+   ret = smu_send_smc_msg_with_param(smu, 
SMU_MSG_EnableSmuFeaturesHigh,
+ feature_high);
+   if (ret)
+   return ret;
+   } else {
+   ret = smu_send_smc_msg_with_param(smu, 
SMU_MSG_DisableSmuFeaturesLow,
+ feature_low);
+   if (ret)
+   return ret;
+   ret = smu_send_smc_msg_with_param(smu, 
SMU_MSG_DisableSmuFeaturesHigh,
+ feature_high);
+   if (ret)
+   return ret;
+   }
+
+   mutex_lock(&feature->mutex);
+   if (enabled)
+   bitmap_or(feature->enabled, feature->enabled,
+   (unsigned long *)(&feature_mask), 
SMU_FEATURE_MAX);
+   else
+   bitmap_andnot(feature->enabled, feature->enabled,
+   (unsigned long *)(&feature_mask), 
SMU_FEATURE_MAX);
+   mutex_unlock(&feature->mutex);
+
+   return ret;
+}
+
 int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask)
 {
int ret = 0;
@@ -591,41 +637,7 @@ int smu_feature_init_dpm(struct smu_context *smu)
 
return ret;
 }
-int smu_feature_update_enable_state(struct smu_context *smu, uint64_t 
feature_mask, bool enabled)
-{
-   uint32_t feature_low = 0, feature_high = 0;
-   int ret = 0;
-
-   if (!smu->pm_enabled)
-   return ret;
-
-   feature_low = (feature_mask >> 0 ) & 0x;
-   feature_high = (feature_mask >> 32) & 0x;
-
-   if (enabled) {
-   ret = smu_send_smc_msg_with_param(smu, 
SMU_MSG_EnableSmuFeaturesLow,
- feature_low);
-   if (ret)
-   return ret;
-   ret = smu_send_smc_msg_with_param(smu, 
SMU_MSG_EnableSmuFeaturesHigh,
- feature_high);
-   if (ret)
-   return ret;
-
-   } else {
-   ret = smu_send_smc_msg_with_param(smu, 
SMU_MSG_DisableSmuFeaturesLow,
- feature_low);
-   if (ret)
-   return ret;
-   ret = smu_send_smc_msg_with_param(smu, 
SMU_MSG_DisableSmuFeaturesHigh,
- feature_high);
-   if (ret)
-   return ret;
 
-   }
-
-   return ret;
-}
 
 int smu_feature_is_enabled(struct smu_context *smu, enum smu_feature_mask mask)
 {
@@ -651,8 +663,6 @@ int smu_feature_set_enabled(struct smu_context *smu, enum 
smu_feature_mask mask,
 {
struct smu_feature *feature = &smu->smu_feature;
int feature_id;
-   uint64_t feature_mask = 0;
-   int ret = 0;
 
feature_id = smu_feature_get_index(smu, mask);
if (feature_id < 0)
@@ -660,22 +670,9 @@ int smu_feature_set_enabled(struct smu_context *smu, enum 
smu_feature_mask mask,
 
WARN_ON(feature_id > feature->feature_num);
 
-   feature_mask = 1ULL << feature_id;
-
-   mutex_lock(&feature->mutex);
-   ret = smu_feature_update_enable_state(smu, feature_mask, enable);
-   if (ret)
-   goto failed;
-
-   if

RE: [PATCH] drm/amd/powerplay: update cached feature enablement status V2

2019-08-22 Thread Quan, Evan
Comment inline

From: Wang, Kevin(Yang) 
Sent: Thursday, August 22, 2019 8:00 PM
To: Quan, Evan ; amd-gfx@lists.freedesktop.org
Subject: Re: [PATCH] drm/amd/powerplay: update cached feature enablement status 
V2

comment inline.

From: amd-gfx 
mailto:amd-gfx-boun...@lists.freedesktop.org>>
 on behalf of Evan Quan mailto:evan.q...@amd.com>>
Sent: Thursday, August 22, 2019 6:18 PM
To: amd-gfx@lists.freedesktop.org 
mailto:amd-gfx@lists.freedesktop.org>>
Cc: Quan, Evan mailto:evan.q...@amd.com>>
Subject: [PATCH] drm/amd/powerplay: update cached feature enablement status V2

Need to update in cache feature enablement status after pp_feature
settings. Another fix for the commit below:
drm/amd/powerplay: implment sysfs feature status function in smu

V2: update smu_feature_update_enable_state() and relates

Change-Id: I90e29b0d839df26825d5993212f6097c7ad4bebf
[kevin]: this information is not neccessary for public, please remove it.
git config gerrit.createchangeid=false
Signed-off-by: Evan Quan mailto:evan.q...@amd.com>>
---
 drivers/gpu/drm/amd/powerplay/amdgpu_smu.c| 104 +-
 .../gpu/drm/amd/powerplay/inc/amdgpu_smu.h|   1 -
 2 files changed, 52 insertions(+), 53 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c 
b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
index 4df7fb6eaf3c..3e1cd5d9c29e 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -94,6 +94,55 @@ size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, 
char *buf)
 return size;
 }

+static int smu_feature_update_enable_state(struct smu_context *smu,
+  uint64_t feature_mask,
+  bool enabled)
+{
+   struct smu_feature *feature = &smu->smu_feature;
+   uint32_t feature_low = 0, feature_high = 0;
+   uint64_t feature_id;
+   int ret = 0;
+
+   if (!smu->pm_enabled)
+   return ret;
+
+   feature_low = (feature_mask >> 0 ) & 0x;
+   feature_high = (feature_mask >> 32) & 0x;
+
+   if (enabled) {
+   ret = smu_send_smc_msg_with_param(smu, 
SMU_MSG_EnableSmuFeaturesLow,
+ feature_low);
+   if (ret)
+   return ret;
+   ret = smu_send_smc_msg_with_param(smu, 
SMU_MSG_EnableSmuFeaturesHigh,
+ feature_high);
+   if (ret)
+   return ret;
+   } else {
+   ret = smu_send_smc_msg_with_param(smu, 
SMU_MSG_DisableSmuFeaturesLow,
+ feature_low);
+   if (ret)
+   return ret;
+   ret = smu_send_smc_msg_with_param(smu, 
SMU_MSG_DisableSmuFeaturesHigh,
+ feature_high);
+   if (ret)
+   return ret;
+   }
+
+   mutex_lock(&feature->mutex);
+   for (feature_id = 0; feature_id < 64; feature_id++) {
+   if (feature_mask & (1ULL << feature_id)) {
+   if (enabled)
+   test_and_set_bit(feature_id, feature->enabled);
+   else
+   test_and_clear_bit(feature_id, 
feature->enabled);
+   }
+   }

//[kevin]: the code logic is a little redundant.
could you use bellow macro to replace that?
header : linux/bitmap.h
 *  bitmap_and(dst, src1, src2, nbits)  *dst = *src1 & *src2
 *  bitmap_or(dst, src1, src2, nbits)   *dst = *src1 | *src2
 *  bitmap_xor(dst, src1, src2, nbits)  *dst = *src1 ^ *src2
 *  bitmap_andnot(dst, src1, src2, nbits)   *dst = *src1 & ~(*src2)

+   mutex_unlock(&feature->mutex);
+
+   return ret;
+}
+

[Quan, Evan] updated in v3.

 int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask)
 {
 int ret = 0;
@@ -591,41 +640,7 @@ int smu_feature_init_dpm(struct smu_context *smu)

 return ret;
 }

[kevin]:
in this patch, i know you only want to fix not cached feature cache issue,
but in v2 patch,
the patch adjust the order of code functions, it seems that this is a brand new 
function,
I don't think it is necessary,
could you just reflect the modified content in the patch, which can facilitate 
us to trace problems and review.
thanks.

[Quan, Evan] Move the API before the place it's called. No problem here.
-int smu_feature_update_enable_state(struct smu_context *smu, uint64_t 
feature_mask, bool enabled)
-{
-   uint32_t feature_low = 0, feature_high = 0;
-   int ret = 0;

-   if (!smu->pm_enabled)
-   return ret;
-
-   feature_low = (feature_mask >> 0 ) & 0x;
-   feature_high = (feature_mask >> 32) & 0x;
-
-   if (enabled) {
-   re

Re: [PATCH] drm/amd/powerplay: update cached feature enablement status V2

2019-08-22 Thread Wang, Kevin(Yang)
comment inline

From: Quan, Evan 
Sent: Friday, August 23, 2019 12:50 PM
To: Wang, Kevin(Yang) ; amd-gfx@lists.freedesktop.org 

Subject: RE: [PATCH] drm/amd/powerplay: update cached feature enablement status 
V2


Comment inline



From: Wang, Kevin(Yang) 
Sent: Thursday, August 22, 2019 8:00 PM
To: Quan, Evan ; amd-gfx@lists.freedesktop.org
Subject: Re: [PATCH] drm/amd/powerplay: update cached feature enablement status 
V2



comment inline.



From: amd-gfx 
mailto:amd-gfx-boun...@lists.freedesktop.org>>
 on behalf of Evan Quan mailto:evan.q...@amd.com>>
Sent: Thursday, August 22, 2019 6:18 PM
To: amd-gfx@lists.freedesktop.org 
mailto:amd-gfx@lists.freedesktop.org>>
Cc: Quan, Evan mailto:evan.q...@amd.com>>
Subject: [PATCH] drm/amd/powerplay: update cached feature enablement status V2



Need to update in cache feature enablement status after pp_feature
settings. Another fix for the commit below:
drm/amd/powerplay: implment sysfs feature status function in smu

V2: update smu_feature_update_enable_state() and relates

Change-Id: I90e29b0d839df26825d5993212f6097c7ad4bebf

[kevin]: this information is not neccessary for public, please remove it.

git config gerrit.createchangeid=false
Signed-off-by: Evan Quan mailto:evan.q...@amd.com>>
---
 drivers/gpu/drm/amd/powerplay/amdgpu_smu.c| 104 +-
 .../gpu/drm/amd/powerplay/inc/amdgpu_smu.h|   1 -
 2 files changed, 52 insertions(+), 53 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c 
b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
index 4df7fb6eaf3c..3e1cd5d9c29e 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -94,6 +94,55 @@ size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, 
char *buf)
 return size;
 }

+static int smu_feature_update_enable_state(struct smu_context *smu,
+  uint64_t feature_mask,
+  bool enabled)
+{
+   struct smu_feature *feature = &smu->smu_feature;
+   uint32_t feature_low = 0, feature_high = 0;
+   uint64_t feature_id;
+   int ret = 0;
+
+   if (!smu->pm_enabled)
+   return ret;
+
+   feature_low = (feature_mask >> 0 ) & 0x;
+   feature_high = (feature_mask >> 32) & 0x;
+
+   if (enabled) {
+   ret = smu_send_smc_msg_with_param(smu, 
SMU_MSG_EnableSmuFeaturesLow,
+ feature_low);
+   if (ret)
+   return ret;
+   ret = smu_send_smc_msg_with_param(smu, 
SMU_MSG_EnableSmuFeaturesHigh,
+ feature_high);
+   if (ret)
+   return ret;
+   } else {
+   ret = smu_send_smc_msg_with_param(smu, 
SMU_MSG_DisableSmuFeaturesLow,
+ feature_low);
+   if (ret)
+   return ret;
+   ret = smu_send_smc_msg_with_param(smu, 
SMU_MSG_DisableSmuFeaturesHigh,
+ feature_high);
+   if (ret)
+   return ret;
+   }
+
+   mutex_lock(&feature->mutex);
+   for (feature_id = 0; feature_id < 64; feature_id++) {
+   if (feature_mask & (1ULL << feature_id)) {
+   if (enabled)
+   test_and_set_bit(feature_id, feature->enabled);
+   else
+   test_and_clear_bit(feature_id, 
feature->enabled);
+   }
+   }



//[kevin]: the code logic is a little redundant.

could you use bellow macro to replace that?

header : linux/bitmap.h

 *  bitmap_and(dst, src1, src2, nbits)  *dst = *src1 & *src2

 *  bitmap_or(dst, src1, src2, nbits)   *dst = *src1 | *src2

 *  bitmap_xor(dst, src1, src2, nbits)  *dst = *src1 ^ *src2

 *  bitmap_andnot(dst, src1, src2, nbits)   *dst = *src1 & ~(*src2)



+   mutex_unlock(&feature->mutex);
+
+   return ret;
+}
+



[Quan, Evan] updated in v3.

 int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask)
 {
 int ret = 0;
@@ -591,41 +640,7 @@ int smu_feature_init_dpm(struct smu_context *smu)

 return ret;
 }



[kevin]:

in this patch, i know you only want to fix not cached feature cache issue,

but in v2 patch,

the patch adjust the order of code functions, it seems that this is a brand new 
function,

I don't think it is necessary,

could you just reflect the modified content in the patch, which can facilitate 
us to trace problems and review.

thanks.



[Quan, Evan] Move the API before the place it’s called. No problem here.


[kevin]: in this patch, you don't need to adjust function order in this file,

because the driver is already expor

RE: [PATCH] drm/amd/powerplay: update cached feature enablement status V2

2019-08-22 Thread Quan, Evan
smu_feature_update_enable_state() is used only in amdgpu_smu.c.
As a common sense, these APIs should be declared as 'static'.

Regards,
Evan
From: Wang, Kevin(Yang) 
Sent: Friday, August 23, 2019 1:30 PM
To: Quan, Evan ; amd-gfx@lists.freedesktop.org
Subject: Re: [PATCH] drm/amd/powerplay: update cached feature enablement status 
V2

comment inline

From: Quan, Evan mailto:evan.q...@amd.com>>
Sent: Friday, August 23, 2019 12:50 PM
To: Wang, Kevin(Yang) mailto:kevin1.w...@amd.com>>; 
amd-gfx@lists.freedesktop.org 
mailto:amd-gfx@lists.freedesktop.org>>
Subject: RE: [PATCH] drm/amd/powerplay: update cached feature enablement status 
V2


Comment inline



From: Wang, Kevin(Yang) mailto:kevin1.w...@amd.com>>
Sent: Thursday, August 22, 2019 8:00 PM
To: Quan, Evan mailto:evan.q...@amd.com>>; 
amd-gfx@lists.freedesktop.org
Subject: Re: [PATCH] drm/amd/powerplay: update cached feature enablement status 
V2



comment inline.



From: amd-gfx 
mailto:amd-gfx-boun...@lists.freedesktop.org>>
 on behalf of Evan Quan mailto:evan.q...@amd.com>>
Sent: Thursday, August 22, 2019 6:18 PM
To: amd-gfx@lists.freedesktop.org 
mailto:amd-gfx@lists.freedesktop.org>>
Cc: Quan, Evan mailto:evan.q...@amd.com>>
Subject: [PATCH] drm/amd/powerplay: update cached feature enablement status V2



Need to update in cache feature enablement status after pp_feature
settings. Another fix for the commit below:
drm/amd/powerplay: implment sysfs feature status function in smu

V2: update smu_feature_update_enable_state() and relates

Change-Id: I90e29b0d839df26825d5993212f6097c7ad4bebf

[kevin]: this information is not neccessary for public, please remove it.

git config gerrit.createchangeid=false
Signed-off-by: Evan Quan mailto:evan.q...@amd.com>>
---
 drivers/gpu/drm/amd/powerplay/amdgpu_smu.c| 104 +-
 .../gpu/drm/amd/powerplay/inc/amdgpu_smu.h|   1 -
 2 files changed, 52 insertions(+), 53 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c 
b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
index 4df7fb6eaf3c..3e1cd5d9c29e 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -94,6 +94,55 @@ size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, 
char *buf)
 return size;
 }

+static int smu_feature_update_enable_state(struct smu_context *smu,
+  uint64_t feature_mask,
+  bool enabled)
+{
+   struct smu_feature *feature = &smu->smu_feature;
+   uint32_t feature_low = 0, feature_high = 0;
+   uint64_t feature_id;
+   int ret = 0;
+
+   if (!smu->pm_enabled)
+   return ret;
+
+   feature_low = (feature_mask >> 0 ) & 0x;
+   feature_high = (feature_mask >> 32) & 0x;
+
+   if (enabled) {
+   ret = smu_send_smc_msg_with_param(smu, 
SMU_MSG_EnableSmuFeaturesLow,
+ feature_low);
+   if (ret)
+   return ret;
+   ret = smu_send_smc_msg_with_param(smu, 
SMU_MSG_EnableSmuFeaturesHigh,
+ feature_high);
+   if (ret)
+   return ret;
+   } else {
+   ret = smu_send_smc_msg_with_param(smu, 
SMU_MSG_DisableSmuFeaturesLow,
+ feature_low);
+   if (ret)
+   return ret;
+   ret = smu_send_smc_msg_with_param(smu, 
SMU_MSG_DisableSmuFeaturesHigh,
+ feature_high);
+   if (ret)
+   return ret;
+   }
+
+   mutex_lock(&feature->mutex);
+   for (feature_id = 0; feature_id < 64; feature_id++) {
+   if (feature_mask & (1ULL << feature_id)) {
+   if (enabled)
+   test_and_set_bit(feature_id, feature->enabled);
+   else
+   test_and_clear_bit(feature_id, 
feature->enabled);
+   }
+   }



//[kevin]: the code logic is a little redundant.

could you use bellow macro to replace that?

header : linux/bitmap.h

 *  bitmap_and(dst, src1, src2, nbits)  *dst = *src1 & *src2

 *  bitmap_or(dst, src1, src2, nbits)   *dst = *src1 | *src2

 *  bitmap_xor(dst, src1, src2, nbits)  *dst = *src1 ^ *src2

 *  bitmap_andnot(dst, src1, src2, nbits)   *dst = *src1 & ~(*src2)



+   mutex_unlock(&feature->mutex);
+
+   return ret;
+}
+



[Quan, Evan] updated in v3.

 int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask)
 {
 int ret = 0;
@@ -591,41 +640,7 @@ int smu_feature_init_dpm(struct smu_context *smu)

 

Re: [PATCH] drm/amd/powerplay: update cached feature enablement status V2

2019-08-22 Thread Wang, Kevin(Yang)
a patch does only one thing.
in this patch,it will do 2 things:

  1.  fixed feature bitmap cached issue.
  2.  make api of smu_feature_update_enabled_state as static function.

for reason#2:
the driver has other apis which only used in amdgpu_smu.c, but it still 
declaration in amdgpu_smu.h.
the intent is to make this a public API.
if you want to make it is a static function, please split it 2 patches. and 
indicate the reason.

Best Regards,
Kevin

From: Quan, Evan 
Sent: Friday, August 23, 2019 1:38 PM
To: Wang, Kevin(Yang) ; amd-gfx@lists.freedesktop.org 

Subject: RE: [PATCH] drm/amd/powerplay: update cached feature enablement status 
V2


smu_feature_update_enable_state() is used only in amdgpu_smu.c.

As a common sense, these APIs should be declared as ‘static’.



Regards,

Evan

From: Wang, Kevin(Yang) 
Sent: Friday, August 23, 2019 1:30 PM
To: Quan, Evan ; amd-gfx@lists.freedesktop.org
Subject: Re: [PATCH] drm/amd/powerplay: update cached feature enablement status 
V2



comment inline



From: Quan, Evan mailto:evan.q...@amd.com>>
Sent: Friday, August 23, 2019 12:50 PM
To: Wang, Kevin(Yang) mailto:kevin1.w...@amd.com>>; 
amd-gfx@lists.freedesktop.org 
mailto:amd-gfx@lists.freedesktop.org>>
Subject: RE: [PATCH] drm/amd/powerplay: update cached feature enablement status 
V2



Comment inline



From: Wang, Kevin(Yang) mailto:kevin1.w...@amd.com>>
Sent: Thursday, August 22, 2019 8:00 PM
To: Quan, Evan mailto:evan.q...@amd.com>>; 
amd-gfx@lists.freedesktop.org
Subject: Re: [PATCH] drm/amd/powerplay: update cached feature enablement status 
V2



comment inline.



From: amd-gfx 
mailto:amd-gfx-boun...@lists.freedesktop.org>>
 on behalf of Evan Quan mailto:evan.q...@amd.com>>
Sent: Thursday, August 22, 2019 6:18 PM
To: amd-gfx@lists.freedesktop.org 
mailto:amd-gfx@lists.freedesktop.org>>
Cc: Quan, Evan mailto:evan.q...@amd.com>>
Subject: [PATCH] drm/amd/powerplay: update cached feature enablement status V2



Need to update in cache feature enablement status after pp_feature
settings. Another fix for the commit below:
drm/amd/powerplay: implment sysfs feature status function in smu

V2: update smu_feature_update_enable_state() and relates

Change-Id: I90e29b0d839df26825d5993212f6097c7ad4bebf

[kevin]: this information is not neccessary for public, please remove it.

git config gerrit.createchangeid=false
Signed-off-by: Evan Quan mailto:evan.q...@amd.com>>
---
 drivers/gpu/drm/amd/powerplay/amdgpu_smu.c| 104 +-
 .../gpu/drm/amd/powerplay/inc/amdgpu_smu.h|   1 -
 2 files changed, 52 insertions(+), 53 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c 
b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
index 4df7fb6eaf3c..3e1cd5d9c29e 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -94,6 +94,55 @@ size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, 
char *buf)
 return size;
 }

+static int smu_feature_update_enable_state(struct smu_context *smu,
+  uint64_t feature_mask,
+  bool enabled)
+{
+   struct smu_feature *feature = &smu->smu_feature;
+   uint32_t feature_low = 0, feature_high = 0;
+   uint64_t feature_id;
+   int ret = 0;
+
+   if (!smu->pm_enabled)
+   return ret;
+
+   feature_low = (feature_mask >> 0 ) & 0x;
+   feature_high = (feature_mask >> 32) & 0x;
+
+   if (enabled) {
+   ret = smu_send_smc_msg_with_param(smu, 
SMU_MSG_EnableSmuFeaturesLow,
+ feature_low);
+   if (ret)
+   return ret;
+   ret = smu_send_smc_msg_with_param(smu, 
SMU_MSG_EnableSmuFeaturesHigh,
+ feature_high);
+   if (ret)
+   return ret;
+   } else {
+   ret = smu_send_smc_msg_with_param(smu, 
SMU_MSG_DisableSmuFeaturesLow,
+ feature_low);
+   if (ret)
+   return ret;
+   ret = smu_send_smc_msg_with_param(smu, 
SMU_MSG_DisableSmuFeaturesHigh,
+ feature_high);
+   if (ret)
+   return ret;
+   }
+
+   mutex_lock(&feature->mutex);
+   for (feature_id = 0; feature_id < 64; feature_id++) {
+   if (feature_mask & (1ULL << feature_id)) {
+   if (enabled)
+   test_and_set_bit(feature_id, feature->enabled);
+   else
+   test_and_clear_bit(feature_id, 
feature->enabled);
+   }
+