[Part II PATCH 01/15] drm/amd/powerplay: add SMU71 header files for iceland (v2)

2016-07-28 Thread Huang Rui
v2: cleanup headers, add copyright

Signed-off-by: Huang Rui 
---
 drivers/gpu/drm/amd/powerplay/inc/smu71.h  | 510 +
 drivers/gpu/drm/amd/powerplay/inc/smu71_discrete.h | 631 +
 2 files changed, 1141 insertions(+)
 create mode 100644 drivers/gpu/drm/amd/powerplay/inc/smu71.h
 create mode 100644 drivers/gpu/drm/amd/powerplay/inc/smu71_discrete.h

diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu71.h 
b/drivers/gpu/drm/amd/powerplay/inc/smu71.h
new file mode 100644
index 000..71c9b2d
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu71.h
@@ -0,0 +1,510 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef SMU71_H
+#define SMU71_H
+
+#if !defined(SMC_MICROCODE)
+#pragma pack(push, 1)
+#endif
+
+#define SMU__NUM_PCIE_DPM_LEVELS 8
+#define SMU__NUM_SCLK_DPM_STATE 8
+#define SMU__NUM_MCLK_DPM_LEVELS 4
+#define SMU__VARIANT__ICELAND 1
+#define SMU__DGPU_ONLY 1
+#define SMU__DYNAMIC_MCARB_SETTINGS 1
+
+enum SID_OPTION {
+  SID_OPTION_HI,
+  SID_OPTION_LO,
+  SID_OPTION_COUNT
+};
+
+typedef struct {
+  uint32_t high;
+  uint32_t low;
+} data_64_t;
+
+typedef struct {
+  data_64_t high;
+  data_64_t low;
+} data_128_t;
+
+#define SMU7_CONTEXT_ID_SMC1
+#define SMU7_CONTEXT_ID_VBIOS  2
+
+#define SMU71_MAX_LEVELS_VDDC8
+#define SMU71_MAX_LEVELS_VDDCI   4
+#define SMU71_MAX_LEVELS_MVDD4
+#define SMU71_MAX_LEVELS_VDDNB   8
+
+#define SMU71_MAX_LEVELS_GRAPHICSSMU__NUM_SCLK_DPM_STATE
+#define SMU71_MAX_LEVELS_MEMORY  SMU__NUM_MCLK_DPM_LEVELS
+#define SMU71_MAX_LEVELS_GIO SMU__NUM_LCLK_DPM_LEVELS
+#define SMU71_MAX_LEVELS_LINKSMU__NUM_PCIE_DPM_LEVELS
+#define SMU71_MAX_ENTRIES_SMIO   32
+
+#define DPM_NO_LIMIT 0
+#define DPM_NO_UP 1
+#define DPM_GO_DOWN 2
+#define DPM_GO_UP 3
+
+#define SMU7_FIRST_DPM_GRAPHICS_LEVEL0
+#define SMU7_FIRST_DPM_MEMORY_LEVEL  0
+
+#define GPIO_CLAMP_MODE_VRHOT  1
+#define GPIO_CLAMP_MODE_THERM  2
+#define GPIO_CLAMP_MODE_DC 4
+
+#define SCRATCH_B_TARG_PCIE_INDEX_SHIFT 0
+#define SCRATCH_B_TARG_PCIE_INDEX_MASK  (0x7

[Part II PATCH 05/15] drm/amdgpu: make amdgpu_cgs_call_acpi_method as static

2016-07-28 Thread Huang Rui
Signed-off-by: Huang Rui 
Reviewed-by: Alex Deucher 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index ee95e95..fc22d39 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -1078,7 +1078,7 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device 
*cgs_device,
 }
 #endif
 
-int amdgpu_cgs_call_acpi_method(struct cgs_device *cgs_device,
+static int amdgpu_cgs_call_acpi_method(struct cgs_device *cgs_device,
uint32_t acpi_method,
uint32_t acpi_function,
void *pinput, void *poutput,
-- 
2.7.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[Part II PATCH 06/15] drm/amdgpu: fix incorrect type of info_id

2016-07-28 Thread Huang Rui
Signed-off-by: Huang Rui 
Reviewed-by: Alex Deucher 
---
 drivers/gpu/drm/amd/include/cgs_common.h | 10 +-
 1 file changed, 5 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/amd/include/cgs_common.h 
b/drivers/gpu/drm/amd/include/cgs_common.h
index f32af2f..ed5fa33 100644
--- a/drivers/gpu/drm/amd/include/cgs_common.h
+++ b/drivers/gpu/drm/amd/include/cgs_common.h
@@ -121,13 +121,13 @@ enum cgs_system_info_id {
 };
 
 struct cgs_system_info {
-   uint64_t   size;
-   uint64_t   info_id;
+   uint64_tsize;
+   enum cgs_system_info_id info_id;
union {
-   void   *ptr;
-   uint64_tvalue;
+   void*ptr;
+   uint64_tvalue;
};
-   uint64_t   padding[13];
+   uint64_tpadding[13];
 };
 
 /*
-- 
2.7.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[Part II PATCH 15/15] drm/amd/powerplay: add DPM running checking back

2016-07-28 Thread Huang Rui
This patch adds DPM running checking back, because the DPM issue is
fixed.

Signed-off-by: Huang Rui 
Reviewed-by: Ken Wang 
---
 drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c | 2 --
 1 file changed, 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c 
b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c
index d538d28..d8ca59b 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c
@@ -4239,11 +4239,9 @@ int iceland_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
 {
iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
 
-#if 0
PP_ASSERT_WITH_CODE (0 == iceland_is_dpm_running(hwmgr),
"Trying to Unforce DPM when DPM is disabled. Returning without 
sending SMC message.",
return -1);
-#endif
 
if (0 == data->sclk_dpm_key_disabled) {
PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(
-- 
2.7.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[Part II PATCH 11/15] drm/amd/powerplay: fix the incorrect checking condition

2016-07-28 Thread Huang Rui
Signed-off-by: Huang Rui 
Reviewed-by: Ken Wang 
---
 drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c 
b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c
index 9c6d7e3..7ffbbef 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c
@@ -4170,7 +4170,7 @@ int iceland_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
return -1);
}
 
-   if (0 == data->pcie_dpm_key_disabled) {
+   if (0 == data->mclk_dpm_key_disabled) {
PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(
 hwmgr->smumgr,
PPSMC_MSG_MCLKDPM_NoForcedLevel)),
-- 
2.7.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[Part II PATCH 12/15] drm/amd/powerplay: add deep sleep initialization

2016-07-28 Thread Huang Rui
This patch adds the deep sleep initialization at DPM, it needs send a
message to SMC to enable this feature before enable voltage controller.

Signed-off-by: Huang Rui 
Reviewed-by: Ken Wang 
---
 .../gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c| 34 ++
 1 file changed, 34 insertions(+)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c 
b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c
index 7ffbbef..7a9749f 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c
@@ -3069,6 +3069,36 @@ static int iceland_tf_start_smc(struct pp_hwmgr *hwmgr)
return ret;
 }
 
+/**
+* Programs the Deep Sleep registers
+*
+* @parampHwMgr  the address of the powerplay hardware manager.
+* @parampInput the pointer to input data 
(PhwEvergreen_DisplayConfiguration)
+* @parampOutput the pointer to output data (unused)
+* @parampStorage the pointer to temporary storage (unused)
+* @paramResult the last failure code (unused)
+* @return   always 0
+*/
+static int iceland_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
+{
+   if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+   PHM_PlatformCaps_SclkDeepSleep)) {
+   if (smum_send_msg_to_smc(hwmgr->smumgr,
+PPSMC_MSG_MASTER_DeepSleep_ON) != 0)
+   PP_ASSERT_WITH_CODE(false,
+   "Attempt to enable Master Deep 
Sleep switch failed!",
+   return -EINVAL);
+   } else {
+   if (smum_send_msg_to_smc(hwmgr->smumgr,
+PPSMC_MSG_MASTER_DeepSleep_OFF) != 0)
+   PP_ASSERT_WITH_CODE(false,
+   "Attempt to disable Master Deep 
Sleep switch failed!",
+   return -EINVAL);
+   }
+
+   return 0;
+}
+
 static int iceland_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
 {
int tmp_result, result = 0;
@@ -3133,6 +3163,10 @@ static int iceland_enable_dpm_tasks(struct pp_hwmgr 
*hwmgr)
PP_ASSERT_WITH_CODE((0 == tmp_result),
"Failed to enable SCLK control!", return tmp_result);
 
+   tmp_result = iceland_enable_deep_sleep_master_switch(hwmgr);
+   PP_ASSERT_WITH_CODE((tmp_result == 0),
+   "Failed to enable deep sleep!", return tmp_result);
+
/* enable DPM */
tmp_result = iceland_start_dpm(hwmgr);
PP_ASSERT_WITH_CODE((0 == tmp_result),
-- 
2.7.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[Part II PATCH 13/15] drm/amd/powerplay: set the platform capability flags for iceland

2016-07-28 Thread Huang Rui
Signed-off-by: Huang Rui 
Reviewed-by: Ken Wang 
---
 .../gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c| 51 ++
 1 file changed, 51 insertions(+)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c 
b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c
index 7a9749f..6075050 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c
@@ -3801,6 +3801,57 @@ static int iceland_hwmgr_backend_init(struct pp_hwmgr 
*hwmgr)
stay_in_boot = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
   PHM_PlatformCaps_StayInBootState);
 
+   phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+   PHM_PlatformCaps_DynamicPowerManagement);
+
+   phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+   PHM_PlatformCaps_ActivityReporting);
+
+   phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+   PHM_PlatformCaps_GFXClockGatingSupport);
+
+   phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+   PHM_PlatformCaps_MemorySpreadSpectrumSupport);
+   phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+   PHM_PlatformCaps_EngineSpreadSpectrumSupport);
+
+   phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+   PHM_PlatformCaps_DynamicPCIEGen2Support);
+   phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+   PHM_PlatformCaps_SMC);
+
+   phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+   PHM_PlatformCaps_DisablePowerGating);
+   phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+   PHM_PlatformCaps_BACO);
+
+   phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+   PHM_PlatformCaps_ThermalAutoThrottling);
+   phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+   PHM_PlatformCaps_DisableLSClockGating);
+   phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+   PHM_PlatformCaps_SamuDPM);
+   phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+   PHM_PlatformCaps_AcpDPM);
+   phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+   PHM_PlatformCaps_OD6inACSupport);
+   phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+   PHM_PlatformCaps_EnablePlatformPowerManagement);
+
+   phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+   PHM_PlatformCaps_PauseMMSessions);
+
+   phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+   PHM_PlatformCaps_OD6PlusinACSupport);
+   phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+   PHM_PlatformCaps_PauseMMSessions);
+   phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+   PHM_PlatformCaps_GFXClockGatingManagedInCAIL);
+   phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+   PHM_PlatformCaps_IcelandULPSSWWorkAround);
+   phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+   PHM_PlatformCaps_PowerContainment);
+
/* iceland doesn't support UVD and VCE */
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
  PHM_PlatformCaps_UVDPowerGating);
-- 
2.7.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[Part II PATCH 14/15] drm/amd/powerplay: add enabling voltage controller back

2016-07-28 Thread Huang Rui
Signed-off-by: Huang Rui 
Reviewed-by: Ken Wang 
---
 drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c | 2 --
 1 file changed, 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c 
b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c
index 6075050..d538d28 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c
@@ -2970,13 +2970,11 @@ int iceland_start_dpm(struct pp_hwmgr *hwmgr)
 
PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE, 
SWRST_COMMAND_1, RESETLC, 0x0);
 
-#if 0
PP_ASSERT_WITH_CODE(
(0 == smum_send_msg_to_smc(hwmgr->smumgr,
PPSMC_MSG_Voltage_Cntl_Enable)),
"Failed to enable voltage DPM during DPM Start 
Function!",
return -1);
-#endif
 
if (0 != iceland_enable_sclk_mclk_dpm(hwmgr)) {
PP_ASSERT_WITH_CODE(0, "Failed to enable Sclk DPM and Mclk 
DPM!", return -1);
-- 
2.7.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[Part II PATCH 04/15] drm/amd/powerplay: add iceland_dyn_defaults header

2016-07-28 Thread Huang Rui
Signed-off-by: Huang Rui 
Reviewed-by: Alex Deucher 
---
 .../drm/amd/powerplay/hwmgr/iceland_dyn_defaults.h | 41 ++
 1 file changed, 41 insertions(+)
 create mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/iceland_dyn_defaults.h

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_dyn_defaults.h 
b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_dyn_defaults.h
new file mode 100644
index 000..a7b4bc6
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_dyn_defaults.h
@@ -0,0 +1,41 @@
+#ifndef ICELAND_DYN_DEFAULTS_H
+#define ICELAND_DYN_DEFAULTS_H
+
+enum ICELANDdpm_TrendDetection
+{
+   ICELANDdpm_TrendDetection_AUTO,
+   ICELANDdpm_TrendDetection_UP,
+   ICELANDdpm_TrendDetection_DOWN
+};
+typedef enum ICELANDdpm_TrendDetection ICELANDdpm_TrendDetection;
+
+
+#define PPICELAND_VOTINGRIGHTSCLIENTS_DFLT0 0x3FFFC102
+#define PPICELAND_VOTINGRIGHTSCLIENTS_DFLT1 0x000400
+#define PPICELAND_VOTINGRIGHTSCLIENTS_DFLT2 0xC00080
+#define PPICELAND_VOTINGRIGHTSCLIENTS_DFLT3 0xC00200
+#define PPICELAND_VOTINGRIGHTSCLIENTS_DFLT4 0xC01680
+#define PPICELAND_VOTINGRIGHTSCLIENTS_DFLT5 0xC00033
+#define PPICELAND_VOTINGRIGHTSCLIENTS_DFLT6 0xC00033
+#define PPICELAND_VOTINGRIGHTSCLIENTS_DFLT7 0x3FFFC000
+
+
+#define PPICELAND_THERMALPROTECTCOUNTER_DFLT0x200
+
+#define PPICELAND_STATICSCREENTHRESHOLDUNIT_DFLT0
+
+#define PPICELAND_STATICSCREENTHRESHOLD_DFLT0x00C8
+
+#define PPICELAND_GFXIDLECLOCKSTOPTHRESHOLD_DFLT0x200
+
+#define PPICELAND_REFERENCEDIVIDER_DFLT 4
+
+#define PPICELAND_ULVVOLTAGECHANGEDELAY_DFLT1687
+
+#define PPICELAND_CGULVPARAMETER_DFLT   0x00040035
+#define PPICELAND_CGULVCONTROL_DFLT 0x7450
+#define PPICELAND_TARGETACTIVITY_DFLT   30
+#define PPICELAND_MCLK_TARGETACTIVITY_DFLT  10
+
+#endif
+
-- 
2.7.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[Part II PATCH 02/15] drm/amd/powerplay: add iceland SMU mananger

2016-07-28 Thread Huang Rui
The system management unit (SMU) is a subcomponent of the northbridge
that is responsible for a variety of system and power management tasks
during boot and runtime for GPU. In powerplay, it will be used on
firmware loading and power task management. This patch adds SMU
mananger for iceland.

Signed-off-by: Huang Rui 
Reviewed-by: Alex Deucher 
---
 drivers/gpu/drm/amd/powerplay/smumgr/Makefile  |   3 +-
 .../gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c  | 713 +
 .../gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h  |  64 ++
 drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c  |   4 +
 4 files changed, 783 insertions(+), 1 deletion(-)
 create mode 100644 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
 create mode 100644 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h

diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile 
b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
index f10fb64..19e7946 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
@@ -2,7 +2,8 @@
 # Makefile for the 'smu manager' sub-component of powerplay.
 # It provides the smu management services for the driver.
 
-SMU_MGR = smumgr.o cz_smumgr.o tonga_smumgr.o fiji_smumgr.o polaris10_smumgr.o
+SMU_MGR = smumgr.o cz_smumgr.o tonga_smumgr.o fiji_smumgr.o \
+ polaris10_smumgr.o iceland_smumgr.o
 
 AMD_PP_SMUMGR = $(addprefix $(AMD_PP_PATH)/smumgr/,$(SMU_MGR))
 
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c 
b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
new file mode 100644
index 000..f506583
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
@@ -0,0 +1,713 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Huang Rui 
+ *
+ */
+#include 
+#include 
+#include 
+#include 
+
+#include "smumgr.h"
+#include "iceland_smumgr.h"
+#include "pp_debug.h"
+#include "smu_ucode_xfer_vi.h"
+#include "ppsmc.h"
+#include "smu/smu_7_1_1_d.h"
+#include "smu/smu_7_1_1_sh_mask.h"
+#include "cgs_common.h"
+
+#define ICELAND_SMC_SIZE   0x2
+#define BUFFER_SIZE8
+#define MAX_STRING_SIZE15
+#define BUFFER_SIZETWO 131072 /*128 *1024*/
+
+/**
+ * Set the address for reading/writing the SMC SRAM space.
+ * @paramsmumgr  the address of the powerplay hardware manager.
+ * @paramsmcAddress the address in the SMC RAM to access.
+ */
+static int iceland_set_smc_sram_address(struct pp_smumgr *smumgr,
+   uint32_t smcAddress, uint32_t limit)
+{
+   if (smumgr == NULL || smumgr->device == NULL)
+   return -EINVAL;
+   PP_ASSERT_WITH_CODE((0 == (3 & smcAddress)),
+   "SMC address must be 4 byte aligned.",
+   return -1;);
+
+   PP_ASSERT_WITH_CODE((limit > (smcAddress + 3)),
+   "SMC address is beyond the SMC RAM area.",
+   return -1;);
+
+   cgs_write_register(smumgr->device, mmSMC_IND_INDEX_0, smcAddress);
+   SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, 
AUTO_INCREMENT_IND_0, 0);
+
+   return 0;
+}
+
+/**
+ * Copy bytes from an array into the SMC RAM space.
+ *
+ * @paramsmumgr  the address of the powerplay SMU manager.
+ * @paramsmcStartAddress the start address in the SMC RAM to copy bytes to.
+ * @paramsrc the byte array to copy the bytes from.
+ * @parambyteCount the number of bytes to copy.
+ */
+int iceland_copy_bytes_to_smc(struct pp_smumgr *smumgr,
+   uint32_t smcStartAddress, const uint8_t *src,
+   uint32_t byteCount, uint32_t limit)
+{
+   uint32_t addr;
+   uint32_t data, orig_data;
+   int result = 0;
+   uint32_t extra_shift;
+
+   if (smumgr == NULL || smumgr->device == NULL)
+   return -EINVAL;
+   PP_ASSERT_WITH_CODE((0 == 

[Part II PATCH 00/15] drm/amd/powerplay: Introduce iceland powerplay support

2016-07-28 Thread Huang Rui
From: Huang Rui 

Hi all,

Part I: https://lists.freedesktop.org/archives/amd-gfx/2016-July/000569.html

After add the prep (Part I) of iceland powerplay, this part (Part II)
introduces iceland powerplay support, and there is two sub-part:

1) Patch 1 - 9: add SMU and HW manager function support.
2) Patch 10 - 15: resolve the DPM issue.

Thanks,
Rui

Huang Rui (15):
  drm/amd/powerplay: add SMU71 header files for iceland (v2)
  drm/amd/powerplay: add iceland SMU mananger
  drm/amdgpu: add new definition in bif header
  drm/amd/powerplay: add iceland_dyn_defaults header
  drm/amdgpu: make amdgpu_cgs_call_acpi_method as static
  drm/amdgpu: fix incorrect type of info_id
  drm/amdgpu: add query device id and revision id into system info entry
at CGS
  drm/amd/powerplay: add iceland HW manager
  drm/amdgpu: enable iceland powerplay manually
  drm/amd/powerplay: rename smum header guards
  drm/amd/powerplay: fix the incorrect checking condition
  drm/amd/powerplay: add deep sleep initialization
  drm/amd/powerplay: set the platform capability flags for iceland
  drm/amd/powerplay: add enabling voltage controller back
  drm/amd/powerplay: add DPM running checking back

 drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c|   10 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c  |4 +-
 drivers/gpu/drm/amd/amdgpu/iceland_smum.h  |4 +-
 .../gpu/drm/amd/include/asic_reg/bif/bif_5_0_d.h   |1 +
 drivers/gpu/drm/amd/include/cgs_common.h   |   12 +-
 drivers/gpu/drm/amd/powerplay/hwmgr/Makefile   |4 +-
 drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c|4 +
 .../amd/powerplay/hwmgr/iceland_clockpowergating.c |  119 +
 .../amd/powerplay/hwmgr/iceland_clockpowergating.h |   38 +
 .../drm/amd/powerplay/hwmgr/iceland_dyn_defaults.h |   41 +
 .../gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c| 5698 
 .../gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.h|  424 ++
 .../drm/amd/powerplay/hwmgr/iceland_powertune.c|  491 ++
 .../drm/amd/powerplay/hwmgr/iceland_powertune.h|   74 +
 .../gpu/drm/amd/powerplay/hwmgr/iceland_thermal.c  |  595 ++
 .../gpu/drm/amd/powerplay/hwmgr/iceland_thermal.h  |   58 +
 drivers/gpu/drm/amd/powerplay/inc/smu71.h  |  510 ++
 drivers/gpu/drm/amd/powerplay/inc/smu71_discrete.h |  631 +++
 drivers/gpu/drm/amd/powerplay/smumgr/Makefile  |3 +-
 .../gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c  |  713 +++
 .../gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h  |   64 +
 drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c  |4 +
 22 files changed, 9490 insertions(+), 12 deletions(-)
 create mode 100644 
drivers/gpu/drm/amd/powerplay/hwmgr/iceland_clockpowergating.c
 create mode 100644 
drivers/gpu/drm/amd/powerplay/hwmgr/iceland_clockpowergating.h
 create mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/iceland_dyn_defaults.h
 create mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c
 create mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.h
 create mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/iceland_powertune.c
 create mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/iceland_powertune.h
 create mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/iceland_thermal.c
 create mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/iceland_thermal.h
 create mode 100644 drivers/gpu/drm/amd/powerplay/inc/smu71.h
 create mode 100644 drivers/gpu/drm/amd/powerplay/inc/smu71_discrete.h
 create mode 100644 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
 create mode 100644 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h

-- 
2.7.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[Part II PATCH 03/15] drm/amdgpu: add new definition in bif header

2016-07-28 Thread Huang Rui
This patch adds new definition in bif header, and will be used on
iceland HW powertune part.

Signed-off-by: Huang Rui 
---
 drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_d.h | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_d.h 
b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_d.h
index 2933297..809759f 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_d.h
@@ -27,6 +27,7 @@
 #define mmMM_INDEX 
 0x0
 #define mmMM_INDEX_HI  
 0x6
 #define mmMM_DATA  
 0x1
+#define mmCC_BIF_BX_STRAP2 
0x152A
 #define mmBIF_MM_INDACCESS_CNTL
 0x1500
 #define mmBIF_DOORBELL_APER_EN 
 0x1501
 #define mmBUS_CNTL 
 0x1508
-- 
2.7.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[Part II PATCH 07/15] drm/amdgpu: add query device id and revision id into system info entry at CGS

2016-07-28 Thread Huang Rui
This patch adds device id and revision into system info entry at CGS,
it's able to get PCI device id and revision id from amdgpu, it might
get more info in future.

PCI device id will be also used on powerplay part at current.

Suggested-by: Alex Deucher 
Signed-off-by: Huang Rui 
Reviewed-by: Alex Deucher 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c  | 8 +++-
 drivers/gpu/drm/amd/include/cgs_common.h | 2 ++
 2 files changed, 9 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index fc22d39..881e38c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -810,7 +810,7 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device 
*cgs_device,
 }
 
 static int amdgpu_cgs_query_system_info(struct cgs_device *cgs_device,
-   struct cgs_system_info *sys_info)
+   struct cgs_system_info *sys_info)
 {
CGS_FUNC_ADEV;
 
@@ -830,6 +830,12 @@ static int amdgpu_cgs_query_system_info(struct cgs_device 
*cgs_device,
case CGS_SYSTEM_INFO_PCIE_MLW:
sys_info->value = adev->pm.pcie_mlw_mask;
break;
+   case CGS_SYSTEM_INFO_PCIE_DEV:
+   sys_info->value = adev->pdev->device;
+   break;
+   case CGS_SYSTEM_INFO_PCIE_REV:
+   sys_info->value = adev->pdev->revision;
+   break;
case CGS_SYSTEM_INFO_CG_FLAGS:
sys_info->value = adev->cg_flags;
break;
diff --git a/drivers/gpu/drm/amd/include/cgs_common.h 
b/drivers/gpu/drm/amd/include/cgs_common.h
index ed5fa33..b86aba9 100644
--- a/drivers/gpu/drm/amd/include/cgs_common.h
+++ b/drivers/gpu/drm/amd/include/cgs_common.h
@@ -113,6 +113,8 @@ enum cgs_system_info_id {
CGS_SYSTEM_INFO_ADAPTER_BDF_ID = 1,
CGS_SYSTEM_INFO_PCIE_GEN_INFO,
CGS_SYSTEM_INFO_PCIE_MLW,
+   CGS_SYSTEM_INFO_PCIE_DEV,
+   CGS_SYSTEM_INFO_PCIE_REV,
CGS_SYSTEM_INFO_CG_FLAGS,
CGS_SYSTEM_INFO_PG_FLAGS,
CGS_SYSTEM_INFO_GFX_CU_INFO,
-- 
2.7.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[Part II PATCH 09/15] drm/amdgpu: enable iceland powerplay manually

2016-07-28 Thread Huang Rui
It's able to enable iceland powerplay manually via the module
parameter. The default state is disabled.

Signed-off-by: Huang Rui 
Reviewed-by: Alex Deucher 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
index b04f6f1..afeff95 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
@@ -110,13 +110,15 @@ static int amdgpu_pp_early_init(void *handle)
case CHIP_STONEY:
adev->pp_enabled = (amdgpu_powerplay == 0) ? false : true;
break;
+   case CHIP_TOPAZ:
+   adev->pp_enabled = (amdgpu_powerplay > 0) ? true : false;
+   break;
/* These chips don't have powerplay implemenations */
case CHIP_BONAIRE:
case CHIP_HAWAII:
case CHIP_KABINI:
case CHIP_MULLINS:
case CHIP_KAVERI:
-   case CHIP_TOPAZ:
default:
adev->pp_enabled = false;
break;
-- 
2.7.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[Part II PATCH 10/15] drm/amd/powerplay: rename smum header guards

2016-07-28 Thread Huang Rui
This patch renames the smum header guards to align with the file name.

Reported-by: Edward O'Callaghan 
Signed-off-by: Huang Rui 
Reviewed-by: Ken Wang 
---
 drivers/gpu/drm/amd/amdgpu/iceland_smum.h | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smum.h 
b/drivers/gpu/drm/amd/amdgpu/iceland_smum.h
index 1e0769e..5983e31 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_smum.h
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_smum.h
@@ -21,8 +21,8 @@
  *
  */
 
-#ifndef ICELAND_SMUMGR_H
-#define ICELAND_SMUMGR_H
+#ifndef ICELAND_SMUM_H
+#define ICELAND_SMUM_H
 
 #include "ppsmc.h"
 
-- 
2.7.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


RE: [PATCH 4/4] drm/amd/powerplay: Prevent UVD powerdown before init

2016-07-28 Thread Zhu, Rex

From: amd-gfx [mailto:amd-gfx-boun...@lists.freedesktop.org] On Behalf Of Alex 
Deucher
Sent: Thursday, July 28, 2016 1:46 PM
To: Tom St Denis
Cc: StDenis, Tom; amd-gfx list
Subject: Re: [PATCH 4/4] drm/amd/powerplay: Prevent UVD powerdown before init

On Tue, Jul 26, 2016 at 11:38 AM, Tom St Denis  wrote:
> Because of the ip_blocks init order powerplay would power down the UVD 
> block before UVD start is called.  This results in a VCPU hang.
>
> This patch prevents power down before UVD is initialized.
>
> Also correct the power up order so clocking is set after power is 
> ungated.
>
> With this applied comparable clock/power behaviour to powerplay=0 with 
> DPM is observed.
>
> Signed-off-by: Tom St Denis 

This patch needs to be split into several patches and reworked a bit.
Also, don't include amdgpu.h in powerplay.  We have cgs for access to registers 
and data from adev, etc.  The idea is to minimize the dependencies between 
components.  We shouldn't be accessing adev directly in powerplay.  A couple 
more comments inline.


Rex:  I also think so. 
1. We can move 
+   WREG32(mmUVD_POWER_STATUS,
+   UVD_POWER_STATUS__UVD_PG_EN_MASK |
+   UVD_POWER_STATUS__UVD_PG_MODE_MASK);
+   else
+   WREG32(mmUVD_POWER_STATUS,
+   UVD_POWER_STATUS__UVD_PG_EN_MASK);
to uvd_v6_0_start.  no need to visit adev in powerplay and dpm.  And uvd test 
also can pass.

2.  for the lock, we can just use pm.mutex.

3.  please also delete enable_clock_power_gatings_tasks in resume_action_chain 
in a separate patch for powerplay.

4.  do we need to add cg_state, pg_state? 



Best Regards
Rex


> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu.h|  6 ++
>  drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c|  5 -
>  drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c  |  8 ---
>  drivers/gpu/drm/amd/amdgpu/vi.c| 12 ---
>  .../drm/amd/powerplay/hwmgr/cz_clockpowergating.c  | 25 
> ++
>  drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c |  7 ++
>  6 files changed, 43 insertions(+), 20 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> index d0460ea2f85b..5616b16e6c0a 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> @@ -1692,6 +1692,7 @@ struct amdgpu_uvd {
> uint32_tsrbm_soft_reset;
> int cg_state, pg_state;
> struct mutexpg_lock;
> +   boolis_init;
>  };
>
>  /*
> @@ -2518,5 +2519,10 @@ int amdgpu_dm_display_resume(struct 
> amdgpu_device *adev );  static inline int 
> amdgpu_dm_display_resume(struct amdgpu_device *adev) { return 0; }  
> #endif
>
> +struct amdgpu_cgs_device {
> +   struct cgs_device base;
> +   struct amdgpu_device *adev;
> +};
> +
>  #include "amdgpu_object.h"
>  #endif
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
> index ee95e950a19b..d553e399a835 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
> @@ -33,11 +33,6 @@
>  #include "atom.h"
>  #include "amdgpu_ucode.h"
>
> -struct amdgpu_cgs_device {
> -   struct cgs_device base;
> -   struct amdgpu_device *adev;
> -};
> -
>  #define CGS_FUNC_ADEV  \
> struct amdgpu_device *adev =\
> ((struct amdgpu_cgs_device *)cgs_device)->adev diff 
> --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 
> b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
> index 422d5300b92e..3b93327c5e25 100644
> --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
> @@ -389,9 +389,9 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
> uint32_t mp_swap_cntl;
> int i, j, r;
>
> -   /* is power gated? then we can't start (TODO: re-enable power) */
> -   if (adev->uvd.pg_state)
> -   return -EINVAL;
> +   /* is power gated? then we can't start but don't return an error */
> +   if (adev->uvd.is_init && adev->uvd.pg_state)
> +   return 0;
>
> /* set CG state to -1 for unset */
> adev->uvd.cg_state = -1;
> @@ -662,6 +662,8 @@ static int uvd_v6_0_ring_test_ring(struct amdgpu_ring 
> *ring)
>   ring->idx, tmp);
> r = -EINVAL;
> }
> +   if (!r)
> +   adev->uvd.is_init = true;
> return r;
>  }
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c 
> b/drivers/gpu/drm/amd/amdgpu/vi.c index 78fea940d120..f4fdde0641b0 
> 100644
> --- a/drivers/gpu/drm/amd/amdgpu/vi.c
> +++ b/drivers/gpu/drm/amd/amdgpu/vi.c
> @@ -1583,10 +1583,8 @@ static int vi_common_early_init(void *handle)
> if (adev->re

[PATCH 2/4] drm/amdgpu: add module parameters to ctrl powerplay feature

2016-07-28 Thread Rex Zhu
Change-Id: I520ff615c5fe1dd78ef2e97c60437347a4994a5a
Signed-off-by: Rex Zhu 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c|  4 
 drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c  |  1 -
 drivers/gpu/drm/amd/powerplay/amd_powerplay.c  |  1 +
 drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c   |  3 ---
 .../gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c   |  5 ++---
 drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c| 22 +-
 .../drm/amd/powerplay/hwmgr/iceland_powertune.c|  5 ++---
 .../gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c  | 10 --
 drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h  |  2 +-
 drivers/gpu/drm/amd/powerplay/inc/hwmgr.h  |  4 +++-
 10 files changed, 34 insertions(+), 23 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index b71a970..f2af1ff 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -85,6 +85,7 @@ int amdgpu_sched_jobs = 32;
 int amdgpu_sched_hw_submission = 2;
 int amdgpu_powerplay = -1;
 int amdgpu_powercontainment = 1;
+int amdgpu_sclk_deep_sleep_en = 1;
 unsigned amdgpu_pcie_gen_cap = 0;
 unsigned amdgpu_pcie_lane_cap = 0;
 unsigned amdgpu_cg_mask = 0x;
@@ -173,6 +174,9 @@ MODULE_PARM_DESC(powercontainment, "Power Containment (1 = 
enable (default), 0 =
 module_param_named(powercontainment, amdgpu_powercontainment, int, 0444);
 #endif
 
+MODULE_PARM_DESC(sclkdeepsleep, "SCLK Deep Sleep (1 = enable (default), 0 = 
disable)");
+module_param_named(sclkdeepsleep, amdgpu_sclk_deep_sleep_en, int, 0444);
+
 MODULE_PARM_DESC(pcie_gen_cap, "PCIE Gen Caps (0: autodetect (default))");
 module_param_named(pcie_gen_cap, amdgpu_pcie_gen_cap, uint, 0444);
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
index afeff95..02bd62a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
@@ -52,7 +52,6 @@ static int amdgpu_powerplay_init(struct amdgpu_device *adev)
pp_init->chip_family = adev->family;
pp_init->chip_id = adev->asic_type;
pp_init->device = amdgpu_cgs_create_device(adev);
-   pp_init->powercontainment_enabled = amdgpu_powercontainment;
 
ret = amd_powerplay_init(pp_init, amd_pp);
kfree(pp_init);
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c 
b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index f9e03ad..e6436ee 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -31,6 +31,7 @@
 #include "eventmanager.h"
 #include "pp_debug.h"
 
+
 #define PP_CHECK(handle)   \
do {\
if ((handle) == NULL || (handle)->pp_valid != PP_VALID) \
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c 
b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
index 744aa88..9ff55f8 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
@@ -619,9 +619,6 @@ static int fiji_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_TablelessHardwareInterface);
 
-   phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-   PHM_PlatformCaps_SclkDeepSleep);
-
data->gpio_debug = 0;
 
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c 
b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c
index 4465845..c3b2f51 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c
@@ -77,9 +77,8 @@ void fiji_initialize_power_tune_defaults(struct pp_hwmgr 
*hwmgr)
 
fiji_hwmgr->fast_watermark_threshold = 100;
 
-   if (hwmgr->powercontainment_enabled) {
-   phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-   PHM_PlatformCaps_PowerContainment);
+   if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+   PHM_PlatformCaps_PowerContainment)) {
tmp = 1;
fiji_hwmgr->enable_dte_feature = tmp ? false : true;
fiji_hwmgr->enable_tdc_limit_feature = tmp ? true : 
false;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c 
b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index 411c267..98ff749 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -41,6 +41,25 @@ extern int fiji_hwmgr_init(struct pp_hwmgr *hwmgr);
 extern int polaris10_hwmgr_init(struct pp_hwmgr *hwmgr);
 extern 

[PATCH 1/4] drm/amd/powerplay: fix typos of volt/millivolt symbols in comment.

2016-07-28 Thread Rex Zhu
Change-Id: Ife6002b68b1e64f76886e6e421b3e6f5f454eda9
Signed-off-by: Rex Zhu 
Reviewed-by: Alexandre Demers 
---
 drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c 
b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c
index e09c231..c81e8a5 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c
@@ -2908,8 +2908,8 @@ static int polaris10_get_evv_voltages(struct pp_hwmgr 
*hwmgr)
continue;
}
 
-   /* need to make sure vddc is less than 2v or else, it 
could burn the ASIC.
-* real voltage level in unit of 0.01mv */
+   /* need to make sure vddc is less than 2V or else, it 
could burn the ASIC.
+* real voltage level in unit of 0.01mV */
PP_ASSERT_WITH_CODE((vddc < 20 && vddc != 0),
"Invalid VDDC value", result = 
-EINVAL;);
 
-- 
1.9.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 4/4] drm/amd/powerplay: not change uvd/vce block's state when initialize/resume

2016-07-28 Thread Rex Zhu
Change-Id: I6e338a5faeb023b13bb450ecb1c4bb3eaa3b0ac5
Signed-off-by: Rex Zhu 
---
 drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c | 2 --
 1 file changed, 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c 
b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
index d6635cc..635fc4b 100644
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
+++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
@@ -30,7 +30,6 @@ static const pem_event_action * const initialize_event[] = {
system_config_tasks,
setup_asic_tasks,
enable_dynamic_state_management_tasks,
-   enable_clock_power_gatings_tasks,
get_2d_performance_state_tasks,
set_performance_state_tasks,
initialize_thermal_controller_tasks,
@@ -140,7 +139,6 @@ static const pem_event_action * const resume_event[] = {
setup_asic_tasks,
enable_stutter_mode_tasks, /*must do this in boot state and before SMC 
is started */
enable_dynamic_state_management_tasks,
-   enable_clock_power_gatings_tasks,
enable_disable_bapm_tasks,
initialize_thermal_controller_tasks,
get_2d_performance_state_tasks,
-- 
1.9.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 3/4] drm/amdgpu: use modules parameter to ctrl deep sleep feature in dpm

2016-07-28 Thread Rex Zhu
Change-Id: I44a5a7fef33fdb1dce9e5f753d45c982f8743c08
Signed-off-by: Rex Zhu 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 +
 drivers/gpu/drm/amd/amdgpu/ci_dpm.c | 5 -
 drivers/gpu/drm/amd/amdgpu/cz_dpm.c | 6 +-
 drivers/gpu/drm/amd/amdgpu/kv_dpm.c | 6 +-
 4 files changed, 15 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index d406ec7..42b9560 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -92,6 +92,7 @@ extern unsigned amdgpu_pcie_gen_cap;
 extern unsigned amdgpu_pcie_lane_cap;
 extern unsigned amdgpu_cg_mask;
 extern unsigned amdgpu_pg_mask;
+extern int amdgpu_sclk_deep_sleep_en;
 
 #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
 #define AMDGPU_MAX_USEC_TIMEOUT10  /* 100 ms */
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c 
b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index 5d4ec41..98ec65b 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -5873,7 +5873,10 @@ static int ci_dpm_init(struct amdgpu_device *adev)
pi->pcie_dpm_key_disabled = 0;
pi->thermal_sclk_dpm_enabled = 0;
 
-   pi->caps_sclk_ds = true;
+   if (amdgpu_sclk_deep_sleep_en)
+   pi->caps_sclk_ds = true;
+   else
+   pi->caps_sclk_ds = false;
 
pi->mclk_strobe_mode_threshold = 4;
pi->mclk_stutter_mode_threshold = 4;
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c 
b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
index 8ba07e7..74afb63 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
@@ -435,7 +435,11 @@ static int cz_dpm_init(struct amdgpu_device *adev)
pi->caps_td_ramping = true;
pi->caps_tcp_ramping = true;
}
-   pi->caps_sclk_ds = true;
+   if (amdgpu_sclk_deep_sleep_en)
+   pi->caps_sclk_ds = true;
+   else
+   pi->caps_sclk_ds = false;
+
pi->voting_clients = 0x00c00033;
pi->auto_thermal_throttling_enabled = true;
pi->bapm_enabled = false;
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c 
b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
index a845e88..f8618a3 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
@@ -2845,7 +2845,11 @@ static int kv_dpm_init(struct amdgpu_device *adev)
pi->caps_tcp_ramping = true;
}
 
-   pi->caps_sclk_ds = true;
+   if (amdgpu_sclk_deep_sleep_en)
+   pi->caps_sclk_ds = true;
+   else
+   pi->caps_sclk_ds = false;
+
pi->enable_auto_thermal_throttling = true;
pi->disable_nb_ps3_in_battery = false;
if (amdgpu_bapm == 0)
-- 
1.9.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 00/13] shadow page table support

2016-07-28 Thread Chunming Zhou
Since we cannot make sure VRAM is safe after gpu reset, page table backup
is neccessary, shadow page table is sense way to recovery page talbe when
gpu reset happens.
We need to allocate GTT bo as the shadow of VRAM bo when creating page table,
and make them same. After gpu reset, we will need to use SDMA to copy GTT bo
content to VRAM bo, then page table will be recoveried.

TODO: gart table should be saved as well, will generate a sperate patch set.

Chunming Zhou (13):
  drm/amdgpu: irq resume should be immediately after gpu resume
  drm/amdgpu: add shadow bo support
  drm/amdgpu: set shadow flag for pd/pt bo
  drm/amdgpu: update shadow pt bo while update pt
  drm/amdgpu: update pd shadow while updating pd
  drm/amdgpu: implement amdgpu_vm_recover_page_table_from_shadow
  drm/amdgpu: link all vm clients
  drm/amdgpu: add vm_list_lock
  drm/amd: add block entity function
  drm/amdgpu: recover page tables after gpu reset
  drm/amd: wait neccessary dependency before running job
  drm/amdgpu: add vm recover pt fence
  drm/amdgpu: add backup condition for shadow page table

 drivers/gpu/drm/amd/amdgpu/amdgpu.h   |  15 ++
 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c|   6 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c|  33 +++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c   |   5 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c|  36 -
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c| 219 ++
 drivers/gpu/drm/amd/scheduler/gpu_scheduler.c |  30 +++-
 drivers/gpu/drm/amd/scheduler/gpu_scheduler.h |   3 +
 include/uapi/drm/amdgpu_drm.h |   2 +
 9 files changed, 316 insertions(+), 33 deletions(-)

-- 
1.9.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 02/13] drm/amdgpu: add shadow bo support

2016-07-28 Thread Chunming Zhou
shadow bo is the shadow of a bo, which is always in GTT,
which can be used to backup the original bo.

Change-Id: Ia27d4225c47ff41d3053eb691276e29fb2d64026
Signed-off-by: Chunming Zhou 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h|  1 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 36 +++---
 include/uapi/drm/amdgpu_drm.h  |  2 ++
 3 files changed, 36 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index b7d9e66..7b4a0cf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -500,6 +500,7 @@ struct amdgpu_bo {
struct amdgpu_device*adev;
struct drm_gem_object   gem_base;
struct amdgpu_bo*parent;
+   struct amdgpu_bo*shadow;
 
struct ttm_bo_kmap_obj  dma_buf_vmap;
struct amdgpu_mn*mn;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 67de19c..6e59b4e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -356,6 +356,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
 {
struct ttm_placement placement = {0};
struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
+   int r;
 
memset(&placements, 0,
   (AMDGPU_GEM_DOMAIN_MAX + 1) * sizeof(struct ttm_place));
@@ -363,9 +364,32 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
amdgpu_ttm_placement_init(adev, &placement,
  placements, domain, flags);
 
-   return amdgpu_bo_create_restricted(adev, size, byte_align, kernel,
-  domain, flags, sg, &placement,
-  resv, bo_ptr);
+   r = amdgpu_bo_create_restricted(adev, size, byte_align, kernel,
+   domain, flags, sg, &placement,
+   resv, bo_ptr);
+   if (r)
+   return r;
+
+   if (flags & AMDGPU_GEM_CREATE_SHADOW) {
+   memset(&placements, 0,
+  (AMDGPU_GEM_DOMAIN_MAX + 1) * sizeof(struct ttm_place));
+
+   amdgpu_ttm_placement_init(adev, &placement,
+ placements, AMDGPU_GEM_DOMAIN_GTT,
+ AMDGPU_GEM_CREATE_CPU_GTT_USWC);
+
+   r = amdgpu_bo_create_restricted(adev, size, byte_align, kernel,
+   AMDGPU_GEM_DOMAIN_GTT,
+   AMDGPU_GEM_CREATE_CPU_GTT_USWC,
+   NULL, &placement,
+   (*bo_ptr)->tbo.resv,
+   &(*bo_ptr)->shadow);
+   if (r)
+   amdgpu_bo_unref(bo_ptr);
+   } else
+   (*bo_ptr)->shadow = NULL;
+
+   return 0;
 }
 
 int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
@@ -422,6 +446,12 @@ void amdgpu_bo_unref(struct amdgpu_bo **bo)
 
if ((*bo) == NULL)
return;
+   if ((*bo)->flags & AMDGPU_GEM_CREATE_SHADOW) {
+   tbo = &((*bo)->shadow->tbo);
+   ttm_bo_unref(&tbo);
+   if (tbo == NULL)
+   (*bo)->shadow = NULL;
+   }
 
tbo = &((*bo)->tbo);
ttm_bo_unref(&tbo);
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index 946f238..2d756d9 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -75,6 +75,8 @@
 #define AMDGPU_GEM_CREATE_NO_CPU_ACCESS(1 << 1)
 /* Flag that USWC attributes should be used for GTT */
 #define AMDGPU_GEM_CREATE_CPU_GTT_USWC (1 << 2)
+/* Flag that create shadow bo(GTT) while allocating vram bo */
+#define AMDGPU_GEM_CREATE_SHADOW   (1 << 3)
 
 struct drm_amdgpu_gem_create_in  {
/** the requested memory size */
-- 
1.9.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 01/13] drm/amdgpu: irq resume should be immediately after gpu resume

2016-07-28 Thread Chunming Zhou
Change-Id: Icf64bf5964f0ef66c239ab0679d51275cc272699
Signed-off-by: Chunming Zhou 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index eab931a..449ea00 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2143,6 +2143,7 @@ retry:
amdgpu_atombios_scratch_regs_restore(adev);
}
if (!r) {
+   amdgpu_irq_gpu_reset_resume_helper(adev);
r = amdgpu_ib_ring_tests(adev);
if (r) {
dev_err(adev->dev, "ib ring test failed (%d).\n", r);
@@ -2177,7 +2178,6 @@ retry:
/* bad news, how to tell it to userspace ? */
dev_info(adev->dev, "GPU reset failed\n");
}
-   amdgpu_irq_gpu_reset_resume_helper(adev);
 
return r;
 }
-- 
1.9.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 04/13] drm/amdgpu: update shadow pt bo while update pt

2016-07-28 Thread Chunming Zhou
Change-Id: I8245cdad490d2a0b8cf4b9320e53e14db0b6add4
Signed-off-by: Chunming Zhou 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 16 
 1 file changed, 12 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index aedd1cb..e7a400d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -651,6 +651,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device 
*adev,
if (vm->page_tables[pt_idx].addr == pt)
continue;
vm->page_tables[pt_idx].addr = pt;
+   vm->page_tables[pt_idx].addr_shadow = pt;
 
pde = pd_addr + pt_idx * 8;
if (((last_pde + 8 * count) != pde) ||
@@ -801,7 +802,7 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device 
*adev,
*vm_update_params,
  struct amdgpu_vm *vm,
  uint64_t start, uint64_t end,
- uint64_t dst, uint32_t flags)
+ uint64_t dst, uint32_t flags, bool shadow)
 {
const uint64_t mask = AMDGPU_VM_PTE_COUNT - 1;
 
@@ -815,7 +816,8 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device 
*adev,
/* initialize the variables */
addr = start;
pt_idx = addr >> amdgpu_vm_block_size;
-   pt = vm->page_tables[pt_idx].entry.robj;
+   pt = shadow ? vm->page_tables[pt_idx].entry_shadow.robj :
+   vm->page_tables[pt_idx].entry.robj;
 
if ((addr & ~mask) == (end & ~mask))
nptes = end - addr;
@@ -834,7 +836,8 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device 
*adev,
/* walk over the address space and update the page tables */
while (addr < end) {
pt_idx = addr >> amdgpu_vm_block_size;
-   pt = vm->page_tables[pt_idx].entry.robj;
+   pt = shadow ? vm->page_tables[pt_idx].entry_shadow.robj :
+   vm->page_tables[pt_idx].entry.robj;
 
if ((addr & ~mask) == (end & ~mask))
nptes = end - addr;
@@ -941,6 +944,8 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device 
*adev,
/* two extra commands for begin/end of fragment */
ndw += 2 * 10;
}
+   /* double ndw, since need to update shadow pt bo as well */
+   ndw *= 2;
 
r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
if (r)
@@ -960,9 +965,12 @@ static int amdgpu_vm_bo_update_mapping(struct 
amdgpu_device *adev,
r = reservation_object_reserve_shared(vm->page_directory->tbo.resv);
if (r)
goto error_free;
+   /* update shadow pt bo */
+   amdgpu_vm_update_ptes(adev, &vm_update_params, vm, start,
+ last + 1, addr, flags, true);
 
amdgpu_vm_update_ptes(adev, &vm_update_params, vm, start,
- last + 1, addr, flags);
+ last + 1, addr, flags, false);
 
amdgpu_ring_pad_ib(ring, vm_update_params.ib);
WARN_ON(vm_update_params.ib->length_dw > ndw);
-- 
1.9.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 09/13] drm/amd: add block entity function

2016-07-28 Thread Chunming Zhou
Change-Id: Ia0378640962eef362569e0bbe090aea1ca083a55
Signed-off-by: Chunming Zhou 
---
 drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | 24 
 drivers/gpu/drm/amd/scheduler/gpu_scheduler.h |  3 +++
 2 files changed, 27 insertions(+)

diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c 
b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index f96aa82..71b5f1a 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -110,6 +110,26 @@ amd_sched_rq_select_entity(struct amd_sched_rq *rq)
 }
 
 /**
+ * block all entity of this run queue
+ *
+ * @rq The run queue to check.
+ *
+ */
+int amd_sched_rq_block_entity(struct amd_sched_rq *rq, bool block)
+{
+   struct amd_sched_entity *entity;
+
+   spin_lock(&rq->lock);
+
+   list_for_each_entry(entity, &rq->entities, list)
+   entity->block = block;
+
+   spin_unlock(&rq->lock);
+
+   return 0;
+}
+
+/**
  * Init a context entity used by scheduler when submit to HW ring.
  *
  * @sched  The pointer to the scheduler
@@ -134,6 +154,7 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
INIT_LIST_HEAD(&entity->list);
entity->rq = rq;
entity->sched = sched;
+   entity->block = false;
 
spin_lock_init(&entity->queue_lock);
r = kfifo_alloc(&entity->job_queue, jobs * sizeof(void *), GFP_KERNEL);
@@ -186,6 +207,9 @@ static bool amd_sched_entity_is_idle(struct 
amd_sched_entity *entity)
  */
 static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity)
 {
+   if (entity->block)
+   return false;
+
if (kfifo_is_empty(&entity->job_queue))
return false;
 
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h 
b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
index 7cbbbfb..a1c0073 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
@@ -52,6 +52,8 @@ struct amd_sched_entity {
 
struct fence*dependency;
struct fence_cb cb;
+
+   boolblock;
 };
 
 /**
@@ -155,4 +157,5 @@ int amd_sched_job_init(struct amd_sched_job *job,
   void *owner);
 void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched);
 void amd_sched_job_recovery(struct amd_gpu_scheduler *sched);
+int amd_sched_rq_block_entity(struct amd_sched_rq *rq, bool block);
 #endif
-- 
1.9.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 05/13] drm/amdgpu: update pd shadow while updating pd

2016-07-28 Thread Chunming Zhou
Change-Id: Icafa90a6625ea7b5ab3e360ba0d73544cda251b0
Signed-off-by: Chunming Zhou 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h|  1 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 68 +++---
 2 files changed, 48 insertions(+), 21 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 4b3c6d2..a7951aa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -891,6 +891,7 @@ struct amdgpu_vm {
 
/* contains the page directory */
struct amdgpu_bo*page_directory;
+   struct amdgpu_bo_list_entry pd_entry_shadow;
unsignedmax_pde_used;
struct fence*page_directory_fence;
uint64_tlast_eviction_counter;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index e7a400d..fb8a7ab 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -138,13 +138,15 @@ void amdgpu_vm_get_pt_bos(struct amdgpu_device *adev, 
struct amdgpu_vm *vm,
/* add the vm page table to the list */
for (i = 0; i <= vm->max_pde_used; ++i) {
struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry;
+   struct amdgpu_bo_list_entry *entry_shadow = 
&vm->page_tables[i].entry_shadow;
 
-   if (!entry->robj)
+   if (!entry->robj || !entry_shadow->robj)
continue;
 
list_add(&entry->tv.head, duplicates);
+   list_add(&entry_shadow->tv.head, duplicates);
}
-
+   list_add(&vm->pd_entry_shadow.tv.head, duplicates);
 }
 
 /**
@@ -597,23 +599,13 @@ uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, 
uint64_t addr)
return result;
 }
 
-/**
- * amdgpu_vm_update_pdes - make sure that page directory is valid
- *
- * @adev: amdgpu_device pointer
- * @vm: requested vm
- * @start: start of GPU address range
- * @end: end of GPU address range
- *
- * Allocates new page tables if necessary
- * and updates the page directory.
- * Returns 0 for success, error for failure.
- */
-int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
-   struct amdgpu_vm *vm)
+
+static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev,
+struct amdgpu_vm *vm, bool shadow)
 {
struct amdgpu_ring *ring;
-   struct amdgpu_bo *pd = vm->page_directory;
+   struct amdgpu_bo *pd = shadow ? vm->page_directory->shadow :
+   vm->page_directory;
uint64_t pd_addr = amdgpu_bo_gpu_offset(pd);
uint32_t incr = AMDGPU_VM_PTE_COUNT * 8;
uint64_t last_pde = ~0, last_pt = ~0;
@@ -648,10 +640,15 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device 
*adev,
continue;
 
pt = amdgpu_bo_gpu_offset(bo);
-   if (vm->page_tables[pt_idx].addr == pt)
-   continue;
-   vm->page_tables[pt_idx].addr = pt;
-   vm->page_tables[pt_idx].addr_shadow = pt;
+   if (!shadow) {
+   if (vm->page_tables[pt_idx].addr == pt)
+   continue;
+   vm->page_tables[pt_idx].addr = pt;
+   } else {
+   if (vm->page_tables[pt_idx].addr_shadow == pt)
+   continue;
+   vm->page_tables[pt_idx].addr_shadow = pt;
+   }
 
pde = pd_addr + pt_idx * 8;
if (((last_pde + 8 * count) != pde) ||
@@ -704,6 +701,29 @@ error_free:
 }
 
 /**
+ * amdgpu_vm_update_pdes - make sure that page directory is valid
+ *
+ * @adev: amdgpu_device pointer
+ * @vm: requested vm
+ * @start: start of GPU address range
+ * @end: end of GPU address range
+ *
+ * Allocates new page tables if necessary
+ * and updates the page directory.
+ * Returns 0 for success, error for failure.
+ */
+int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
+   struct amdgpu_vm *vm)
+{
+   int r;
+
+   r = amdgpu_vm_update_pd_or_shadow(adev, vm, true);
+   if (r)
+   return r;
+   return amdgpu_vm_update_pd_or_shadow(adev, vm, false);
+}
+
+/**
  * amdgpu_vm_frag_ptes - add fragment information to PTEs
  *
  * @adev: amdgpu_device pointer
@@ -1573,6 +1593,12 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct 
amdgpu_vm *vm)
goto error_free_page_directory;
vm->last_eviction_counter = atomic64_read(&adev->num_evictions);
 
+   vm->pd_entry_shadow.robj = vm->page_directory->shadow;
+   vm->pd_entry_shadow.priority = 0;
+   vm->pd_entry_shadow.tv.bo = &vm->page_directory->shadow->tbo;
+   vm->pd_entry_shadow.tv.shared = true;
+   vm->pd_entry_shadow.user_pages = NULL;
+
return 0;
 
 error_free_p

[PATCH 10/13] drm/amdgpu: recover page tables after gpu reset

2016-07-28 Thread Chunming Zhou
Change-Id: I963598ba6eb44bc8620d70e026c0175d1a1de120
Signed-off-by: Chunming Zhou 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 28 +++-
 1 file changed, 27 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index de782ae..7e63ef9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2146,18 +2146,44 @@ retry:
amdgpu_atombios_scratch_regs_restore(adev);
}
if (!r) {
+   struct amdgpu_ring *buffer_ring = adev->mman.buffer_funcs_ring;
+
amdgpu_irq_gpu_reset_resume_helper(adev);
r = amdgpu_ib_ring_tests(adev);
if (r) {
dev_err(adev->dev, "ib ring test failed (%d).\n", r);
r = amdgpu_suspend(adev);
+   need_full_reset = true;
goto retry;
}
-
+   /**
+* recovery vm page tables, since we cannot depend on VRAM is 
no problem
+* after gpu full reset.
+*/
+   if (need_full_reset && !(adev->flags & AMD_IS_APU)) {
+   struct amdgpu_vm *vm, *tmp;
+
+   DRM_INFO("recover page table from shadow\n");
+   amd_sched_rq_block_entity(
+   
&buffer_ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL], true);
+   kthread_unpark(buffer_ring->sched.thread);
+   spin_lock(&adev->vm_list_lock);
+   list_for_each_entry_safe(vm, tmp, &adev->vm_list, list) 
{
+   spin_unlock(&adev->vm_list_lock);
+   amdgpu_vm_recover_page_table_from_shadow(adev, 
vm);
+   spin_lock(&adev->vm_list_lock);
+   }
+   spin_unlock(&adev->vm_list_lock);
+   amd_sched_rq_block_entity(
+   
&buffer_ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL], false);
+   }
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
if (!ring)
continue;
+
+   DRM_INFO("ring:%d recover jobs\n", ring->idx);
+   kthread_park(buffer_ring->sched.thread);
amd_sched_job_recovery(&ring->sched);
kthread_unpark(ring->sched.thread);
}
-- 
1.9.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 03/13] drm/amdgpu: set shadow flag for pd/pt bo

2016-07-28 Thread Chunming Zhou
the pd/pt shadow bo will be used to backup page table, when gpu reset
happens, we can restore the page table by them.

Change-Id: I31eeb581f203d1db0654a48745ef4e64ed40ed9b
Signed-off-by: Chunming Zhou 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h|  2 ++
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 16 +---
 2 files changed, 15 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 7b4a0cf..4b3c6d2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -869,6 +869,8 @@ struct amdgpu_ring {
 struct amdgpu_vm_pt {
struct amdgpu_bo_list_entry entry;
uint64_taddr;
+   struct amdgpu_bo_list_entry entry_shadow;
+   uint64_taddr_shadow;
 };
 
 struct amdgpu_vm {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 38c80ea..aedd1cb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -1331,9 +1331,10 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
/* walk over the address space and allocate the page tables */
for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) {
struct reservation_object *resv = vm->page_directory->tbo.resv;
-   struct amdgpu_bo_list_entry *entry;
+   struct amdgpu_bo_list_entry *entry, *entry_shadow;
struct amdgpu_bo *pt;
 
+   entry_shadow = &vm->page_tables[pt_idx].entry_shadow;
entry = &vm->page_tables[pt_idx].entry;
if (entry->robj)
continue;
@@ -1341,7 +1342,8 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
 AMDGPU_GPU_PAGE_SIZE, true,
 AMDGPU_GEM_DOMAIN_VRAM,
-AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
+AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
+AMDGPU_GEM_CREATE_SHADOW,
 NULL, resv, &pt);
if (r)
goto error_free;
@@ -1363,6 +1365,13 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
entry->tv.shared = true;
entry->user_pages = NULL;
vm->page_tables[pt_idx].addr = 0;
+
+   entry_shadow->robj = pt->shadow;
+   entry_shadow->priority = 0;
+   entry_shadow->tv.bo = &entry_shadow->robj->tbo;
+   entry_shadow->tv.shared = true;
+   entry_shadow->user_pages = NULL;
+   vm->page_tables[pt_idx].addr_shadow = 0;
}
 
return 0;
@@ -1540,7 +1549,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct 
amdgpu_vm *vm)
 
r = amdgpu_bo_create(adev, pd_size, align, true,
 AMDGPU_GEM_DOMAIN_VRAM,
-AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
+AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
+AMDGPU_GEM_CREATE_SHADOW,
 NULL, NULL, &vm->page_directory);
if (r)
goto error_free_sched_entity;
-- 
1.9.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 11/13] drm/amd: wait neccessary dependency before running job

2016-07-28 Thread Chunming Zhou
Change-Id: Ibcc3558c2330caad1a2edb9902b3f21bd950d19f
Signed-off-by: Chunming Zhou 
---
 drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | 6 +-
 1 file changed, 5 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c 
b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index 71b5f1a..a15fd88 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -436,9 +436,13 @@ void amd_sched_job_recovery(struct amd_gpu_scheduler 
*sched)
 
list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
struct amd_sched_fence *s_fence = s_job->s_fence;
-   struct fence *fence;
+   struct fence *fence, *dependency;
 
spin_unlock(&sched->job_list_lock);
+   while ((dependency = sched->ops->dependency(s_job))) {
+  fence_wait(dependency, false);
+  fence_put(dependency);
+   }
fence = sched->ops->run_job(s_job);
atomic_inc(&sched->hw_rq_count);
if (fence) {
-- 
1.9.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 13/13] drm/amdgpu: add backup condition for shadow page table

2016-07-28 Thread Chunming Zhou
Change-Id: I5a8c0f4c1e9b65d2310ccb0f669b478884072a11
Signed-off-by: Chunming Zhou 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 71 +++---
 1 file changed, 48 insertions(+), 23 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 1305dc1..0e3f116 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -112,6 +112,14 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
list_add(&entry->tv.head, validated);
 }
 
+static bool amdgpu_vm_need_backup(struct amdgpu_device *adev)
+{
+   if (adev->flags & AMD_IS_APU)
+   return false;
+
+   return amdgpu_lockup_timeout > 0 ? true : false;
+}
+
 /**
  * amdgpu_vm_get_bos - add the vm BOs to a duplicates list
  *
@@ -140,13 +148,18 @@ void amdgpu_vm_get_pt_bos(struct amdgpu_device *adev, 
struct amdgpu_vm *vm,
struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry;
struct amdgpu_bo_list_entry *entry_shadow = 
&vm->page_tables[i].entry_shadow;
 
-   if (!entry->robj || !entry_shadow->robj)
+   if (!entry->robj)
+   continue;
+
+   if (amdgpu_vm_need_backup(adev) && !entry_shadow->robj)
continue;
 
list_add(&entry->tv.head, duplicates);
-   list_add(&entry_shadow->tv.head, duplicates);
+   if (amdgpu_vm_need_backup(adev))
+   list_add(&entry_shadow->tv.head, duplicates);
}
-   list_add(&vm->pd_entry_shadow.tv.head, duplicates);
+   if (amdgpu_vm_need_backup(adev))
+   list_add(&vm->pd_entry_shadow.tv.head, duplicates);
 }
 
 /**
@@ -747,6 +760,8 @@ int amdgpu_vm_recover_page_table_from_shadow(struct 
amdgpu_device *adev,
uint64_t pt_idx;
int r;
 
+   if (!amdgpu_vm_need_backup(adev))
+   return 0;
/* bo and shadow use same resv, so reverve one time */
r = amdgpu_bo_reserve(vm->page_directory, false);
if (unlikely(r != 0))
@@ -804,9 +819,12 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device 
*adev,
 {
int r;
 
-   r = amdgpu_vm_update_pd_or_shadow(adev, vm, true);
-   if (r)
-   return r;
+   if (amdgpu_vm_need_backup(adev)) {
+   r = amdgpu_vm_update_pd_or_shadow(adev, vm, true);
+   if (r)
+   return r;
+   }
+
return amdgpu_vm_update_pd_or_shadow(adev, vm, false);
 }
 
@@ -1072,10 +1090,11 @@ static int amdgpu_vm_bo_update_mapping(struct 
amdgpu_device *adev,
r = reservation_object_reserve_shared(vm->page_directory->tbo.resv);
if (r)
goto error_free;
-   /* update shadow pt bo */
-   amdgpu_vm_update_ptes(adev, &vm_update_params, vm, start,
- last + 1, addr, flags, true);
-
+   if (amdgpu_vm_need_backup(adev)) {
+   /* update shadow pt bo */
+   amdgpu_vm_update_ptes(adev, &vm_update_params, vm, start,
+ last + 1, addr, flags, true);
+   }
amdgpu_vm_update_ptes(adev, &vm_update_params, vm, start,
  last + 1, addr, flags, false);
 
@@ -1458,7 +1477,8 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
 AMDGPU_GPU_PAGE_SIZE, true,
 AMDGPU_GEM_DOMAIN_VRAM,
 AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
-AMDGPU_GEM_CREATE_SHADOW,
+(amdgpu_vm_need_backup(adev) ?
+ AMDGPU_GEM_CREATE_SHADOW : 0),
 NULL, resv, &pt);
if (r)
goto error_free;
@@ -1481,12 +1501,14 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
entry->user_pages = NULL;
vm->page_tables[pt_idx].addr = 0;
 
-   entry_shadow->robj = pt->shadow;
-   entry_shadow->priority = 0;
-   entry_shadow->tv.bo = &entry_shadow->robj->tbo;
-   entry_shadow->tv.shared = true;
-   entry_shadow->user_pages = NULL;
-   vm->page_tables[pt_idx].addr_shadow = 0;
+   if (amdgpu_vm_need_backup(adev)) {
+   entry_shadow->robj = pt->shadow;
+   entry_shadow->priority = 0;
+   entry_shadow->tv.bo = &entry_shadow->robj->tbo;
+   entry_shadow->tv.shared = true;
+   entry_shadow->user_pages = NULL;
+   vm->page_tables[pt_idx].addr_shadow = 0;
+   }
}
 
return 0;
@@ -1667,7 +1689,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct 
amdgpu_vm *vm)
r = amdgpu_bo_create(adev, pd_size, align, true,
   

[PATCH 07/13] drm/amdgpu: link all vm clients

2016-07-28 Thread Chunming Zhou
Add vm client to list tail when creating it, move to head while submit to 
scheduler.

Change-Id: I0625092f918853303a5ee97ea2eac87fb790ed69
Signed-off-by: Chunming Zhou 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h| 6 ++
 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 4 
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 2 ++
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 3 +++
 4 files changed, 15 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 1f941c4a..7e93d34 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -889,6 +889,9 @@ struct amdgpu_vm {
/* BO mappings freed, but not yet updated in the PT */
struct list_headfreed;
 
+   /* vm itself list */
+   struct list_headlist;
+
/* contains the page directory */
struct amdgpu_bo*page_directory;
struct amdgpu_bo_list_entry pd_entry_shadow;
@@ -2160,6 +2163,9 @@ struct amdgpu_device {
struct kfd_dev  *kfd;
 
struct amdgpu_virtualization virtualization;
+
+   /* link all vm clients */
+   struct list_headvm_list;
 };
 
 bool amdgpu_device_is_px(struct drm_device *dev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 45d5227..d19838b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -835,7 +835,10 @@ static int amdgpu_cs_dependencies(struct amdgpu_device 
*adev,
 static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
union drm_amdgpu_cs *cs)
 {
+   struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
+   struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_ring *ring = p->job->ring;
+   struct amdgpu_device *adev = ring->adev;
struct amd_sched_entity *entity = &p->ctx->rings[ring->idx].entity;
struct amdgpu_job *job;
int r;
@@ -858,6 +861,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
 
trace_amdgpu_cs_ioctl(job);
amd_sched_entity_push_job(&job->base);
+   list_move(&vm->list, &adev->vm_list);
 
return 0;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 449ea00..877afb5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1559,6 +1559,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
spin_lock_init(&adev->gc_cac_idx_lock);
spin_lock_init(&adev->audio_endpt_idx_lock);
 
+   INIT_LIST_HEAD(&adev->vm_list);
+
adev->rmmio_base = pci_resource_start(adev->pdev, 5);
adev->rmmio_size = pci_resource_len(adev->pdev, 5);
adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 4f95dc4..35d939b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -1628,6 +1628,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct 
amdgpu_vm *vm)
INIT_LIST_HEAD(&vm->invalidated);
INIT_LIST_HEAD(&vm->cleared);
INIT_LIST_HEAD(&vm->freed);
+   INIT_LIST_HEAD(&vm->list);
 
pd_size = amdgpu_vm_directory_size(adev);
pd_entries = amdgpu_vm_num_pdes(adev);
@@ -1675,6 +1676,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct 
amdgpu_vm *vm)
vm->pd_entry_shadow.tv.bo = &vm->page_directory->shadow->tbo;
vm->pd_entry_shadow.tv.shared = true;
vm->pd_entry_shadow.user_pages = NULL;
+   list_add_tail(&vm->list, &adev->vm_list);
 
return 0;
 
@@ -1702,6 +1704,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct 
amdgpu_vm *vm)
struct amdgpu_bo_va_mapping *mapping, *tmp;
int i;
 
+   list_del(&vm->list);
amd_sched_entity_fini(vm->entity.sched, &vm->entity);
 
if (!RB_EMPTY_ROOT(&vm->va)) {
-- 
1.9.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 12/13] drm/amdgpu: add vm recover pt fence

2016-07-28 Thread Chunming Zhou
Before every job runs, we must make sure which's vm is recoverred completely.

Change-Id: Ibe77a3c8f8206def280543fbb4195ad2ab9772e0
Signed-off-by: Chunming Zhou 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h |  2 ++
 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c |  5 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c  | 24 ++--
 3 files changed, 25 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 357d56a6..43beefb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -913,6 +913,8 @@ struct amdgpu_vm {
 
/* client id */
u64 client_id;
+
+   struct fence*recover_pt_fence;
 };
 
 struct amdgpu_vm_id {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 6674d40..8d87a9a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -152,6 +152,10 @@ static struct fence *amdgpu_job_dependency(struct 
amd_sched_job *sched_job)
fence = amdgpu_sync_get_fence(&job->sync);
}
 
+   if (fence == NULL && vm && vm->recover_pt_fence &&
+   !fence_is_signaled(vm->recover_pt_fence))
+   fence = fence_get(vm->recover_pt_fence);
+
return fence;
 }
 
@@ -170,6 +174,7 @@ static struct fence *amdgpu_job_run(struct amd_sched_job 
*sched_job)
BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL));
 
trace_amdgpu_sched_run_job(job);
+
r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs,
   job->sync.last_vm_update, job, &fence);
if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 1cb2e71..1305dc1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -703,11 +703,11 @@ error_free:
 static int amdgpu_vm_recover_bo_from_shadow(struct amdgpu_device *adev,
struct amdgpu_bo *bo,
struct amdgpu_bo *bo_shadow,
-   struct reservation_object *resv)
+   struct reservation_object *resv,
+   struct fence **fence)
 
 {
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
-   struct fence *fence;
int r;
uint64_t vram_addr, gtt_addr;
 
@@ -727,9 +727,9 @@ static int amdgpu_vm_recover_bo_from_shadow(struct 
amdgpu_device *adev,
goto err3;
 
r = amdgpu_copy_buffer(ring, gtt_addr, vram_addr,
-  amdgpu_bo_size(bo), resv, &fence);
+  amdgpu_bo_size(bo), resv, fence);
if (!r)
-   amdgpu_bo_fence(bo, fence, true);
+   amdgpu_bo_fence(bo, *fence, true);
 
 err3:
amdgpu_bo_unpin(bo_shadow);
@@ -743,6 +743,7 @@ err1:
 int amdgpu_vm_recover_page_table_from_shadow(struct amdgpu_device *adev,
 struct amdgpu_vm *vm)
 {
+   struct fence *fence;
uint64_t pt_idx;
int r;
 
@@ -753,11 +754,14 @@ int amdgpu_vm_recover_page_table_from_shadow(struct 
amdgpu_device *adev,
 
r = amdgpu_vm_recover_bo_from_shadow(adev, vm->page_directory,
 vm->page_directory->shadow,
-NULL);
+NULL, &fence);
if (r) {
DRM_ERROR("recover page table failed!\n");
goto err;
}
+   fence_put(vm->recover_pt_fence);
+   vm->recover_pt_fence = fence_get(fence);
+   fence_put(fence);
 
for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
struct amdgpu_bo *bo = vm->page_tables[pt_idx].entry.robj;
@@ -766,15 +770,21 @@ int amdgpu_vm_recover_page_table_from_shadow(struct 
amdgpu_device *adev,
if (!bo || !bo_shadow)
continue;
r = amdgpu_vm_recover_bo_from_shadow(adev, bo, bo_shadow,
-NULL);
+NULL, &fence);
if (r) {
DRM_ERROR("recover page table failed!\n");
goto err;
}
+   fence_put(vm->recover_pt_fence);
+   vm->recover_pt_fence = fence_get(fence);
+   fence_put(fence);
}
 
 err:
amdgpu_bo_unreserve(vm->page_directory);
+   if (vm->recover_pt_fence)
+   r = fence_wait(vm->recover_pt_fence, false);
+
return r;
 }
 /**
@@ -1629,6 +1639,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct 
amdgpu_vm *vm)
INIT_LIST_HEAD(&vm->cleared);
INIT_LIST_HEAD(&vm->freed);
  

[PATCH 06/13] drm/amdgpu: implement amdgpu_vm_recover_page_table_from_shadow

2016-07-28 Thread Chunming Zhou
Change-Id: I9957e726576289448911f5fb2ff7bcb9311a1906
Signed-off-by: Chunming Zhou 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h|  2 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 77 ++
 2 files changed, 79 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index a7951aa..1f941c4a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1000,6 +1000,8 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
   uint64_t addr);
 void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
  struct amdgpu_bo_va *bo_va);
+int amdgpu_vm_recover_page_table_from_shadow(struct amdgpu_device *adev,
+struct amdgpu_vm *vm);
 
 /*
  * context related structures
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index fb8a7ab..4f95dc4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -700,6 +700,83 @@ error_free:
return r;
 }
 
+static int amdgpu_vm_recover_bo_from_shadow(struct amdgpu_device *adev,
+   struct amdgpu_bo *bo,
+   struct amdgpu_bo *bo_shadow,
+   struct reservation_object *resv)
+
+{
+   struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
+   struct fence *fence;
+   int r;
+   uint64_t vram_addr, gtt_addr;
+
+   r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_VRAM, &vram_addr);
+   if (r) {
+   DRM_ERROR("Failed to pin bo object\n");
+   goto err1;
+   }
+   r = amdgpu_bo_pin(bo_shadow, AMDGPU_GEM_DOMAIN_GTT, >t_addr);
+   if (r) {
+   DRM_ERROR("Failed to pin bo shadow object\n");
+   goto err2;
+   }
+
+   r = reservation_object_reserve_shared(bo->tbo.resv);
+   if (r)
+   goto err3;
+
+   r = amdgpu_copy_buffer(ring, gtt_addr, vram_addr,
+  amdgpu_bo_size(bo), resv, &fence);
+   if (!r)
+   amdgpu_bo_fence(bo, fence, true);
+
+err3:
+   amdgpu_bo_unpin(bo_shadow);
+err2:
+   amdgpu_bo_unpin(bo);
+err1:
+
+   return r;
+}
+
+int amdgpu_vm_recover_page_table_from_shadow(struct amdgpu_device *adev,
+struct amdgpu_vm *vm)
+{
+   uint64_t pt_idx;
+   int r;
+
+   /* bo and shadow use same resv, so reverve one time */
+   r = amdgpu_bo_reserve(vm->page_directory, false);
+   if (unlikely(r != 0))
+   return r;
+
+   r = amdgpu_vm_recover_bo_from_shadow(adev, vm->page_directory,
+vm->page_directory->shadow,
+NULL);
+   if (r) {
+   DRM_ERROR("recover page table failed!\n");
+   goto err;
+   }
+
+   for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
+   struct amdgpu_bo *bo = vm->page_tables[pt_idx].entry.robj;
+   struct amdgpu_bo *bo_shadow = 
vm->page_tables[pt_idx].entry_shadow.robj;
+
+   if (!bo || !bo_shadow)
+   continue;
+   r = amdgpu_vm_recover_bo_from_shadow(adev, bo, bo_shadow,
+NULL);
+   if (r) {
+   DRM_ERROR("recover page table failed!\n");
+   goto err;
+   }
+   }
+
+err:
+   amdgpu_bo_unreserve(vm->page_directory);
+   return r;
+}
 /**
  * amdgpu_vm_update_pdes - make sure that page directory is valid
  *
-- 
1.9.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 08/13] drm/amdgpu: add vm_list_lock

2016-07-28 Thread Chunming Zhou
To lock adev->vm_list.

Change-Id: I74d309eca9c22d190dd4072c69d26fa7fdea8884
Signed-off-by: Chunming Zhou 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h| 1 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 2 ++
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 1 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 4 
 4 files changed, 8 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 7e93d34..357d56a6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -2166,6 +2166,7 @@ struct amdgpu_device {
 
/* link all vm clients */
struct list_headvm_list;
+   spinlock_t  vm_list_lock;
 };
 
 bool amdgpu_device_is_px(struct drm_device *dev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index d19838b..29c10f4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -861,7 +861,9 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
 
trace_amdgpu_cs_ioctl(job);
amd_sched_entity_push_job(&job->base);
+   spin_lock(&adev->vm_list_lock);
list_move(&vm->list, &adev->vm_list);
+   spin_unlock(&adev->vm_list_lock);
 
return 0;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 877afb5..de782ae 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1560,6 +1560,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
spin_lock_init(&adev->audio_endpt_idx_lock);
 
INIT_LIST_HEAD(&adev->vm_list);
+   spin_lock_init(&adev->vm_list_lock);
 
adev->rmmio_base = pci_resource_start(adev->pdev, 5);
adev->rmmio_size = pci_resource_len(adev->pdev, 5);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 35d939b..1cb2e71 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -1676,7 +1676,9 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct 
amdgpu_vm *vm)
vm->pd_entry_shadow.tv.bo = &vm->page_directory->shadow->tbo;
vm->pd_entry_shadow.tv.shared = true;
vm->pd_entry_shadow.user_pages = NULL;
+   spin_lock(&adev->vm_list_lock);
list_add_tail(&vm->list, &adev->vm_list);
+   spin_unlock(&adev->vm_list_lock);
 
return 0;
 
@@ -1704,7 +1706,9 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct 
amdgpu_vm *vm)
struct amdgpu_bo_va_mapping *mapping, *tmp;
int i;
 
+   spin_lock(&adev->vm_list_lock);
list_del(&vm->list);
+   spin_unlock(&adev->vm_list_lock);
amd_sched_entity_fini(vm->entity.sched, &vm->entity);
 
if (!RB_EMPTY_ROOT(&vm->va)) {
-- 
1.9.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 02/11] drm/amdgpu: specify entity to amdgpu_copy_buffer

2016-07-28 Thread Chunming Zhou
Change-Id: Ib84621d8ab61bf2ca0719c6888cc403982127684
Signed-off-by: Chunming Zhou 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h   | 1 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c | 3 ++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_test.c  | 8 
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c   | 5 +++--
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c| 2 +-
 5 files changed, 11 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index ebd5565..9f7fae0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -431,6 +431,7 @@ struct amdgpu_mman {
 };
 
 int amdgpu_copy_buffer(struct amdgpu_ring *ring,
+  struct amd_sched_entity *entity,
   uint64_t src_offset,
   uint64_t dst_offset,
   uint32_t byte_count,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
index 33e47a4..cab93c7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
@@ -39,7 +39,8 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device 
*adev, unsigned size,
start_jiffies = jiffies;
for (i = 0; i < n; i++) {
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
-   r = amdgpu_copy_buffer(ring, saddr, daddr, size, NULL, &fence);
+   r = amdgpu_copy_buffer(ring, &adev->mman.entity,
+  saddr, daddr, size, NULL, &fence);
if (r)
goto exit_do_move;
r = fence_wait(fence, false);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
index 05a53f4..bbaa1c1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
@@ -110,8 +110,8 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
 
amdgpu_bo_kunmap(gtt_obj[i]);
 
-   r = amdgpu_copy_buffer(ring, gtt_addr, vram_addr,
-  size, NULL, &fence);
+   r = amdgpu_copy_buffer(ring, &adev->mman.entity, gtt_addr,
+  vram_addr, size, NULL, &fence);
 
if (r) {
DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
@@ -155,8 +155,8 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
 
amdgpu_bo_kunmap(vram_obj);
 
-   r = amdgpu_copy_buffer(ring, vram_addr, gtt_addr,
-  size, NULL, &fence);
+   r = amdgpu_copy_buffer(ring, &adev->mman.entity, vram_addr,
+  gtt_addr, size, NULL, &fence);
 
if (r) {
DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index b7742e6..757a71b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -283,7 +283,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
 
BUILD_BUG_ON((PAGE_SIZE % AMDGPU_GPU_PAGE_SIZE) != 0);
 
-   r = amdgpu_copy_buffer(ring, old_start, new_start,
+   r = amdgpu_copy_buffer(ring, &adev->mman.entity, old_start, new_start,
   new_mem->num_pages * PAGE_SIZE, /* bytes */
   bo->resv, &fence);
if (r)
@@ -1147,6 +1147,7 @@ int amdgpu_mmap(struct file *filp, struct vm_area_struct 
*vma)
 }
 
 int amdgpu_copy_buffer(struct amdgpu_ring *ring,
+  struct amd_sched_entity *entity,
   uint64_t src_offset,
   uint64_t dst_offset,
   uint32_t byte_count,
@@ -1195,7 +1196,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring,
 
amdgpu_ring_pad_ib(ring, &job->ibs[0]);
WARN_ON(job->ibs[0].length_dw > num_dw);
-   r = amdgpu_job_submit(job, ring, &adev->mman.entity,
+   r = amdgpu_job_submit(job, ring, entity,
  AMDGPU_FENCE_OWNER_UNDEFINED, fence);
if (r)
goto error_free;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 0e3f116..11c1263 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -739,7 +739,7 @@ static int amdgpu_vm_recover_bo_from_shadow(struct 
amdgpu_device *adev,
if (r)
goto err3;
 
-   r = amdgpu_copy_buffer(ring, gtt_addr, vram_addr,
+   r = amdgpu_copy_buffer(ring, &adev->mman.entity, gtt_addr, vram_addr,
   amdgpu_bo_size(bo), resv, fence);
if (!r)
amdgpu_bo_fence(bo, *fence, true);
-- 
1.9.1

___
amd-gfx mailing list
am

[PATCH 01/11] drm/amdgpu: hw ring should be empty when gpu reset

2016-07-28 Thread Chunming Zhou
Change-Id: I08ca5a805f590cc7aad0e9ccd91bd5925bb216e2
Signed-off-by: Chunming Zhou 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h|  1 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c |  1 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c   | 11 +++
 3 files changed, 13 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 43beefb..ebd5565 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1247,6 +1247,7 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev);
 int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw);
 void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count);
 void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib 
*ib);
+void amdgpu_ring_reset(struct amdgpu_ring *ring);
 void amdgpu_ring_commit(struct amdgpu_ring *ring);
 void amdgpu_ring_undo(struct amdgpu_ring *ring);
 int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 7e63ef9..1968251 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2102,6 +2102,7 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
continue;
kthread_park(ring->sched.thread);
amd_sched_hw_job_reset(&ring->sched);
+   amdgpu_ring_reset(ring);
}
/* after all hw jobs are reset, hw fence is meaningless, so 
force_completion */
amdgpu_fence_driver_force_completion(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 9989e25..75e1da6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -110,6 +110,17 @@ void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, 
struct amdgpu_ib *ib)
ib->ptr[ib->length_dw++] = ring->nop;
 }
 
+void amdgpu_ring_reset(struct amdgpu_ring *ring)
+{
+   u32 rptr = amdgpu_ring_get_rptr(ring);
+
+   ring->wptr = rptr;
+   ring->wptr &= ring->ptr_mask;
+
+   mb();
+   amdgpu_ring_set_wptr(ring);
+}
+
 /**
  * amdgpu_ring_commit - tell the GPU to execute the new
  * commands on the ring buffer
-- 
1.9.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 09/11] drm/amd: hw job list should be exact

2016-07-28 Thread Chunming Zhou
hw job list should be exact, so deleting job node should be in irq
handler instead of work thread.
And Calculating time of next job should be immediate as well.

Change-Id: I6d2686d84be3e7077300df7181c2a284fbcda9eb
Signed-off-by: Chunming Zhou 
---
 drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | 38 +--
 1 file changed, 18 insertions(+), 20 deletions(-)

diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c 
b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index 9f4fa6e..69a9d40 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -367,34 +367,32 @@ static void amd_sched_job_finish(struct work_struct *work)
struct amd_sched_job *s_job = container_of(work, struct amd_sched_job,
   finish_work);
struct amd_gpu_scheduler *sched = s_job->sched;
-   unsigned long flags;
-
-   /* remove job from ring_mirror_list */
-   spin_lock_irqsave(&sched->job_list_lock, flags);
-   list_del_init(&s_job->node);
-   if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
-   struct amd_sched_job *next;
 
-   spin_unlock_irqrestore(&sched->job_list_lock, flags);
+   if (sched->timeout != MAX_SCHEDULE_TIMEOUT)
cancel_delayed_work_sync(&s_job->work_tdr);
-   spin_lock_irqsave(&sched->job_list_lock, flags);
-
-   /* queue TDR for next job */
-   next = list_first_entry_or_null(&sched->ring_mirror_list,
-   struct amd_sched_job, node);
 
-   if (next)
-   schedule_delayed_work(&next->work_tdr, sched->timeout);
-   }
-   spin_unlock_irqrestore(&sched->job_list_lock, flags);
sched->ops->free_job(s_job);
 }
 
 static void amd_sched_job_finish_cb(struct fence *f, struct fence_cb *cb)
 {
-   struct amd_sched_job *job = container_of(cb, struct amd_sched_job,
-finish_cb);
-   schedule_work(&job->finish_work);
+   struct amd_sched_job *s_job = container_of(cb, struct amd_sched_job,
+  finish_cb);
+   struct amd_gpu_scheduler *sched = s_job->sched;
+   struct amd_sched_job *next;
+   unsigned long flags;
+
+   /* remove job from ring_mirror_list */
+   spin_lock_irqsave(&sched->job_list_lock, flags);
+   list_del_init(&s_job->node);
+   /* queue TDR for next job */
+   next = list_first_entry_or_null(&sched->ring_mirror_list,
+   struct amd_sched_job, node);
+   spin_unlock_irqrestore(&sched->job_list_lock, flags);
+   if (next)
+   schedule_delayed_work(&next->work_tdr, sched->timeout);
+
+   schedule_work(&s_job->finish_work);
 }
 
 static void amd_sched_job_begin(struct amd_sched_job *s_job)
-- 
1.9.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 07/11] drm/amd: add recover entity for every scheduler

2016-07-28 Thread Chunming Zhou
It will be used to recover hw jobs.

Change-Id: I5508f5ffa04909b480ddd669dfb297e5059eba04
Signed-off-by: Chunming Zhou 
---
 drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | 24 
 drivers/gpu/drm/amd/scheduler/gpu_scheduler.h |  1 +
 2 files changed, 21 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c 
b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index a15fd88..36f5805 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -635,7 +635,7 @@ int amd_sched_init(struct amd_gpu_scheduler *sched,
   const struct amd_sched_backend_ops *ops,
   unsigned hw_submission, long timeout, const char *name)
 {
-   int i;
+   int i, r;
sched->ops = ops;
sched->hw_submission_limit = hw_submission;
sched->name = name;
@@ -648,22 +648,37 @@ int amd_sched_init(struct amd_gpu_scheduler *sched,
INIT_LIST_HEAD(&sched->ring_mirror_list);
spin_lock_init(&sched->job_list_lock);
atomic_set(&sched->hw_rq_count, 0);
+   r = amd_sched_entity_init(sched, &sched->recover_entity,
+ &sched->sched_rq[AMD_SCHED_PRIORITY_RECOVER],
+ hw_submission);
+   if (r)
+   return r;
if (atomic_inc_return(&sched_fence_slab_ref) == 1) {
sched_fence_slab = kmem_cache_create(
"amd_sched_fence", sizeof(struct amd_sched_fence), 0,
SLAB_HWCACHE_ALIGN, NULL);
-   if (!sched_fence_slab)
-   return -ENOMEM;
+   if (!sched_fence_slab) {
+   r = -ENOMEM;
+   goto err1;
+   }
}
 
/* Each scheduler will run on a seperate kernel thread */
sched->thread = kthread_run(amd_sched_main, sched, sched->name);
if (IS_ERR(sched->thread)) {
DRM_ERROR("Failed to create scheduler for %s.\n", name);
-   return PTR_ERR(sched->thread);
+   r = PTR_ERR(sched->thread);
+   goto err2;
}
 
return 0;
+err2:
+   if (atomic_dec_and_test(&sched_fence_slab_ref))
+   kmem_cache_destroy(sched_fence_slab);
+
+err1:
+   amd_sched_entity_fini(sched, &sched->recover_entity);
+   return r;
 }
 
 /**
@@ -677,4 +692,5 @@ void amd_sched_fini(struct amd_gpu_scheduler *sched)
kthread_stop(sched->thread);
if (atomic_dec_and_test(&sched_fence_slab_ref))
kmem_cache_destroy(sched_fence_slab);
+   amd_sched_entity_fini(sched, &sched->recover_entity);
 }
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h 
b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
index cd87bc7..8245316 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
@@ -133,6 +133,7 @@ struct amd_gpu_scheduler {
struct task_struct  *thread;
struct list_headring_mirror_list;
spinlock_t  job_list_lock;
+   struct amd_sched_entity recover_entity;
 };
 
 int amd_sched_init(struct amd_gpu_scheduler *sched,
-- 
1.9.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 08/11] drm/amd: use scheduler to recover hw jobs

2016-07-28 Thread Chunming Zhou
The old way is trying to recover hw jobs directly, which will conflict
with scheduler thread.

Change-Id: I9e45abd43ae280a675b0b0d88a820106dea2716c
Signed-off-by: Chunming Zhou 
---
 drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | 48 +--
 1 file changed, 16 insertions(+), 32 deletions(-)

diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c 
b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index 36f5805..9f4fa6e 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -324,10 +324,12 @@ amd_sched_entity_pop_job(struct amd_sched_entity *entity)
  *
  * Returns true if we could submit the job.
  */
-static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
+static bool amd_sched_entity_in_or_recover(struct amd_sched_job *sched_job,
+  bool recover)
 {
struct amd_gpu_scheduler *sched = sched_job->sched;
-   struct amd_sched_entity *entity = sched_job->s_entity;
+   struct amd_sched_entity *entity = recover ? &sched->recover_entity :
+   sched_job->s_entity;
bool added, first = false;
 
spin_lock(&entity->queue_lock);
@@ -348,6 +350,15 @@ static bool amd_sched_entity_in(struct amd_sched_job 
*sched_job)
return added;
 }
 
+static void amd_sched_entity_push_job_recover(struct amd_sched_job *sched_job)
+{
+   struct amd_sched_entity *entity = sched_job->s_entity;
+
+   trace_amd_sched_job(sched_job);
+   wait_event(entity->sched->job_scheduled,
+  amd_sched_entity_in_or_recover(sched_job, true));
+}
+
 /* job_finish is called after hw fence signaled, and
  * the job had already been deleted from ring_mirror_list
  */
@@ -426,39 +437,12 @@ void amd_sched_hw_job_reset(struct amd_gpu_scheduler 
*sched)
 void amd_sched_job_recovery(struct amd_gpu_scheduler *sched)
 {
struct amd_sched_job *s_job, *tmp;
-   int r;
 
spin_lock(&sched->job_list_lock);
-   s_job = list_first_entry_or_null(&sched->ring_mirror_list,
-struct amd_sched_job, node);
-   if (s_job)
-   schedule_delayed_work(&s_job->work_tdr, sched->timeout);
-
list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
-   struct amd_sched_fence *s_fence = s_job->s_fence;
-   struct fence *fence, *dependency;
-
+   list_del_init(&s_job->node);
spin_unlock(&sched->job_list_lock);
-   while ((dependency = sched->ops->dependency(s_job))) {
-  fence_wait(dependency, false);
-  fence_put(dependency);
-   }
-   fence = sched->ops->run_job(s_job);
-   atomic_inc(&sched->hw_rq_count);
-   if (fence) {
-   s_fence->parent = fence_get(fence);
-   r = fence_add_callback(fence, &s_fence->cb,
-  amd_sched_process_job);
-   if (r == -ENOENT)
-   amd_sched_process_job(fence, &s_fence->cb);
-   else if (r)
-   DRM_ERROR("fence add callback failed (%d)\n",
- r);
-   fence_put(fence);
-   } else {
-   DRM_ERROR("Failed to run job!\n");
-   amd_sched_process_job(NULL, &s_fence->cb);
-   }
+   amd_sched_entity_push_job_recover(s_job);
spin_lock(&sched->job_list_lock);
}
spin_unlock(&sched->job_list_lock);
@@ -479,7 +463,7 @@ void amd_sched_entity_push_job(struct amd_sched_job 
*sched_job)
fence_add_callback(&sched_job->s_fence->finished, &sched_job->finish_cb,
   amd_sched_job_finish_cb);
wait_event(entity->sched->job_scheduled,
-  amd_sched_entity_in(sched_job));
+  amd_sched_entity_in_or_recover(sched_job, false));
 }
 
 /* init a sched_job with basic field */
-- 
1.9.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 03/11] drm/amd: add recover run queue for scheduler

2016-07-28 Thread Chunming Zhou
Change-Id: I7171d1e3884aabe1263d8f7be18cadf2e98216a4
Signed-off-by: Chunming Zhou 
---
 drivers/gpu/drm/amd/scheduler/gpu_scheduler.h | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h 
b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
index a1c0073..cd87bc7 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
@@ -112,7 +112,8 @@ struct amd_sched_backend_ops {
 };
 
 enum amd_sched_priority {
-   AMD_SCHED_PRIORITY_KERNEL = 0,
+   AMD_SCHED_PRIORITY_RECOVER = 0,
+   AMD_SCHED_PRIORITY_KERNEL,
AMD_SCHED_PRIORITY_NORMAL,
AMD_SCHED_MAX_PRIORITY
 };
-- 
1.9.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 10/11] drm/amd: reset jobs to recover entity

2016-07-28 Thread Chunming Zhou
remove recover_entity for recover_rq when reset job.
add recover_entity back when recover job

Change-Id: Ic2e5cb6ab79d2abc49374e1770299487e327efe9
Signed-off-by: Chunming Zhou 
---
 drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | 19 ++-
 1 file changed, 10 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c 
b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index 69a9d40..f832d0d 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -417,9 +417,10 @@ static void amd_sched_job_timedout(struct work_struct 
*work)
job->sched->ops->timedout_job(job);
 }
 
+/* scheduler must be parked before job reset */
 void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched)
 {
-   struct amd_sched_job *s_job;
+   struct amd_sched_job *s_job, *tmp;
 
spin_lock(&sched->job_list_lock);
list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) {
@@ -429,14 +430,6 @@ void amd_sched_hw_job_reset(struct amd_gpu_scheduler 
*sched)
}
}
atomic_set(&sched->hw_rq_count, 0);
-   spin_unlock(&sched->job_list_lock);
-}
-
-void amd_sched_job_recovery(struct amd_gpu_scheduler *sched)
-{
-   struct amd_sched_job *s_job, *tmp;
-
-   spin_lock(&sched->job_list_lock);
list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
list_del_init(&s_job->node);
spin_unlock(&sched->job_list_lock);
@@ -444,6 +437,14 @@ void amd_sched_job_recovery(struct amd_gpu_scheduler 
*sched)
spin_lock(&sched->job_list_lock);
}
spin_unlock(&sched->job_list_lock);
+   amd_sched_rq_remove_entity(&sched->sched_rq[AMD_SCHED_PRIORITY_RECOVER],
+  &sched->recover_entity);
+}
+
+void amd_sched_job_recovery(struct amd_gpu_scheduler *sched)
+{
+   amd_sched_rq_add_entity(&sched->sched_rq[AMD_SCHED_PRIORITY_RECOVER],
+   &sched->recover_entity);
 }
 
 /**
-- 
1.9.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 05/11] drm/amdgpu: add vm recover entity

2016-07-28 Thread Chunming Zhou
every vm uses itself recover entity to recovery page table from shadow.

Change-Id: I93e37666cb3fb511311c96ff172b6e9ebd337547
Signed-off-by: Chunming Zhou 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h|  3 ++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 21 ++---
 2 files changed, 16 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 9f7fae0..98f631a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -911,7 +911,8 @@ struct amdgpu_vm {
 
/* Scheduler entity for page table updates */
struct amd_sched_entity entity;
-
+   struct amd_sched_entity recover_entity;
+   struct amdgpu_ring  *ring;
/* client id */
u64 client_id;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 1d58577..6d2a28a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -714,13 +714,13 @@ error_free:
 }
 
 static int amdgpu_vm_recover_bo_from_shadow(struct amdgpu_device *adev,
+   struct amdgpu_vm *vm,
struct amdgpu_bo *bo,
struct amdgpu_bo *bo_shadow,
struct reservation_object *resv,
struct fence **fence)
 
 {
-   struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
int r;
uint64_t vram_addr, gtt_addr;
 
@@ -739,8 +739,8 @@ static int amdgpu_vm_recover_bo_from_shadow(struct 
amdgpu_device *adev,
if (r)
goto err3;
 
-   r = amdgpu_copy_buffer(ring, &adev->mman.entity, gtt_addr, vram_addr,
-  amdgpu_bo_size(bo), resv, fence);
+   r = amdgpu_copy_buffer(vm->ring, &vm->recover_entity, gtt_addr,
+  vram_addr, amdgpu_bo_size(bo), resv, fence);
if (!r)
amdgpu_bo_fence(bo, *fence, true);
 
@@ -767,7 +767,7 @@ int amdgpu_vm_recover_page_table_from_shadow(struct 
amdgpu_device *adev,
if (unlikely(r != 0))
return r;
 
-   r = amdgpu_vm_recover_bo_from_shadow(adev, vm->page_directory,
+   r = amdgpu_vm_recover_bo_from_shadow(adev, vm, vm->page_directory,
 vm->page_directory->shadow,
 NULL, &fence);
if (r) {
@@ -784,7 +784,7 @@ int amdgpu_vm_recover_page_table_from_shadow(struct 
amdgpu_device *adev,
 
if (!bo || !bo_shadow)
continue;
-   r = amdgpu_vm_recover_bo_from_shadow(adev, bo, bo_shadow,
+   r = amdgpu_vm_recover_bo_from_shadow(adev, vm, bo, bo_shadow,
 NULL, &fence);
if (r) {
DRM_ERROR("recover page table failed!\n");
@@ -1678,12 +1678,17 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct 
amdgpu_vm *vm)
ring_instance = atomic_inc_return(&adev->vm_manager.vm_pte_next_ring);
ring_instance %= adev->vm_manager.vm_pte_num_rings;
ring = adev->vm_manager.vm_pte_rings[ring_instance];
+   rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_RECOVER];
+   r = amd_sched_entity_init(&ring->sched, &vm->recover_entity,
+ rq, amdgpu_sched_jobs);
+   if (r)
+   goto err;
rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL];
r = amd_sched_entity_init(&ring->sched, &vm->entity,
  rq, amdgpu_sched_jobs);
if (r)
-   goto err;
-
+   goto err1;
+   vm->ring = ring;
vm->page_directory_fence = NULL;
 
r = amdgpu_bo_create(adev, pd_size, align, true,
@@ -1725,6 +1730,8 @@ error_free_page_directory:
 error_free_sched_entity:
amd_sched_entity_fini(&ring->sched, &vm->entity);
 
+err1:
+   amd_sched_entity_fini(&ring->sched, &vm->recover_entity);
 err:
drm_free_large(vm->page_tables);
 
-- 
1.9.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 00/11] add recovery entity

2016-07-28 Thread Chunming Zhou
every vm has itself recovery entity, which is used to reovery page table from 
their shadow.
They don't need to wait front vm completed.
And also using all pte rings can speed reovery.

every scheduler has its own recovery entity, which is used to save hw jobs, and 
resubmit from it, which solves the conflicts between reset thread and scheduler 
thread when run job.

And some fixes when doing this improment.

Chunming Zhou (11):
  drm/amdgpu: hw ring should be empty when gpu reset
  drm/amdgpu: specify entity to amdgpu_copy_buffer
  drm/amd: add recover run queue for scheduler
  drm/amdgpu: fix vm init error path
  drm/amdgpu: add vm recover entity
  drm/amdgpu: use all pte rings to recover page table
  drm/amd: add recover entity for every scheduler
  drm/amd: use scheduler to recover hw jobs
  drm/amd: hw job list should be exact
  drm/amd: reset jobs to recover entity
  drm/amdgpu: no need fence wait every time

 drivers/gpu/drm/amd/amdgpu/amdgpu.h   |   5 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c |   3 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c|  35 +--
 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c  |  11 +++
 drivers/gpu/drm/amd/amdgpu/amdgpu_test.c  |   8 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c   |   5 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c|  26 --
 drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | 129 +-
 drivers/gpu/drm/amd/scheduler/gpu_scheduler.h |   4 +-
 9 files changed, 134 insertions(+), 92 deletions(-)

-- 
1.9.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 06/11] drm/amdgpu: use all pte rings to recover page table

2016-07-28 Thread Chunming Zhou
Change-Id: Ic74508ec9de0bf1c027313ce9574e6cb8ea9bb1d
Signed-off-by: Chunming Zhou 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 34 ++
 1 file changed, 25 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 1968251..e91177a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2083,6 +2083,7 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
int i, r;
int resched;
bool need_full_reset;
+   u32 unpark_bits;
 
if (!amdgpu_check_soft_reset(adev)) {
DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
@@ -2104,6 +2105,7 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
amd_sched_hw_job_reset(&ring->sched);
amdgpu_ring_reset(ring);
}
+   unpark_bits = 0;
/* after all hw jobs are reset, hw fence is meaningless, so 
force_completion */
amdgpu_fence_driver_force_completion(adev);
/* store modesetting */
@@ -2147,8 +2149,6 @@ retry:
amdgpu_atombios_scratch_regs_restore(adev);
}
if (!r) {
-   struct amdgpu_ring *buffer_ring = adev->mman.buffer_funcs_ring;
-
amdgpu_irq_gpu_reset_resume_helper(adev);
r = amdgpu_ib_ring_tests(adev);
if (r) {
@@ -2163,11 +2163,20 @@ retry:
 */
if (need_full_reset && !(adev->flags & AMD_IS_APU)) {
struct amdgpu_vm *vm, *tmp;
+   int i;
 
DRM_INFO("recover page table from shadow\n");
-   amd_sched_rq_block_entity(
-   
&buffer_ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL], true);
-   kthread_unpark(buffer_ring->sched.thread);
+   for (i = 0; i < adev->vm_manager.vm_pte_num_rings; i++) 
{
+   struct amdgpu_ring *ring = 
adev->vm_manager.vm_pte_rings[i];
+
+   amd_sched_rq_block_entity(
+   
&ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL], true);
+   amd_sched_rq_block_entity(
+   
&ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL], true);
+   kthread_unpark(ring->sched.thread);
+   unpark_bits |= 1 << ring->idx;
+   }
+
spin_lock(&adev->vm_list_lock);
list_for_each_entry_safe(vm, tmp, &adev->vm_list, list) 
{
spin_unlock(&adev->vm_list_lock);
@@ -2175,8 +2184,15 @@ retry:
spin_lock(&adev->vm_list_lock);
}
spin_unlock(&adev->vm_list_lock);
-   amd_sched_rq_block_entity(
-   
&buffer_ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL], false);
+
+   for (i = 0; i < adev->vm_manager.vm_pte_num_rings; i++) 
{
+   struct amdgpu_ring *ring = 
adev->vm_manager.vm_pte_rings[i];
+
+   amd_sched_rq_block_entity(
+   
&ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL], false);
+   amd_sched_rq_block_entity(
+   
&ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL], false);
+   }
}
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
@@ -2184,9 +2200,9 @@ retry:
continue;
 
DRM_INFO("ring:%d recover jobs\n", ring->idx);
-   kthread_park(buffer_ring->sched.thread);
amd_sched_job_recovery(&ring->sched);
-   kthread_unpark(ring->sched.thread);
+   if (!((unpark_bits >> ring->idx) & 0x1))
+   kthread_unpark(ring->sched.thread);
}
} else {
dev_err(adev->dev, "asic resume failed (%d).\n", r);
-- 
1.9.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 04/11] drm/amdgpu: fix vm init error path

2016-07-28 Thread Chunming Zhou
Change-Id: Ie3d5440dc0d2d3a61d8e785ab08b8b91eda223db
Signed-off-by: Chunming Zhou 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 11c1263..1d58577 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -1682,7 +1682,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct 
amdgpu_vm *vm)
r = amd_sched_entity_init(&ring->sched, &vm->entity,
  rq, amdgpu_sched_jobs);
if (r)
-   return r;
+   goto err;
 
vm->page_directory_fence = NULL;
 
@@ -1725,6 +1725,9 @@ error_free_page_directory:
 error_free_sched_entity:
amd_sched_entity_fini(&ring->sched, &vm->entity);
 
+err:
+   drm_free_large(vm->page_tables);
+
return r;
 }
 
-- 
1.9.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 11/11] drm/amdgpu: no need fence wait every time

2016-07-28 Thread Chunming Zhou
recover entities have handled very well for each dependency.

Change-Id: I70a8d0e2753741c4b54d9e01085d00dd708b5c80
Signed-off-by: Chunming Zhou 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 2 --
 1 file changed, 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 6d2a28a..b2790eb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -797,8 +797,6 @@ int amdgpu_vm_recover_page_table_from_shadow(struct 
amdgpu_device *adev,
 
 err:
amdgpu_bo_unreserve(vm->page_directory);
-   if (vm->recover_pt_fence)
-   r = fence_wait(vm->recover_pt_fence, false);
 
return r;
 }
-- 
1.9.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 4/4] drm/amd/powerplay: Prevent UVD powerdown before init

2016-07-28 Thread StDenis, Tom
I found that moving the PG_EN to start would cause problems.  I'm pretty sure 
you have to write it just before calling the firmware otherwise bad things 
happen which is why I moved it there.  We could probably move it into the 
uvd_v6_0's powergating function which is called just before the smu call.


I'll take care of comments 2/3.  For 4 it was a sanity check to prevent 
out-of-order operations.  It can probably be safely removed now.


My work tree also has a half-implemented VCE PG/CG (PG works, CG doesn't) that 
has some of the same issues so I'll fix them too and sort the tree out a bit 
and re-send the whole lot sometime today.


Tom


From: Zhu, Rex
Sent: Thursday, July 28, 2016 03:43
To: Alex Deucher; Tom St Denis
Cc: StDenis, Tom; amd-gfx list
Subject: RE: [PATCH 4/4] drm/amd/powerplay: Prevent UVD powerdown before init


From: amd-gfx [mailto:amd-gfx-boun...@lists.freedesktop.org] On Behalf Of Alex 
Deucher
Sent: Thursday, July 28, 2016 1:46 PM
To: Tom St Denis
Cc: StDenis, Tom; amd-gfx list
Subject: Re: [PATCH 4/4] drm/amd/powerplay: Prevent UVD powerdown before init

On Tue, Jul 26, 2016 at 11:38 AM, Tom St Denis  wrote:
> Because of the ip_blocks init order powerplay would power down the UVD
> block before UVD start is called.  This results in a VCPU hang.
>
> This patch prevents power down before UVD is initialized.
>
> Also correct the power up order so clocking is set after power is
> ungated.
>
> With this applied comparable clock/power behaviour to powerplay=0 with
> DPM is observed.
>
> Signed-off-by: Tom St Denis 

This patch needs to be split into several patches and reworked a bit.
Also, don't include amdgpu.h in powerplay.  We have cgs for access to registers 
and data from adev, etc.  The idea is to minimize the dependencies between 
components.  We shouldn't be accessing adev directly in powerplay.  A couple 
more comments inline.


Rex:  I also think so.
1. We can move
+   WREG32(mmUVD_POWER_STATUS,
+   UVD_POWER_STATUS__UVD_PG_EN_MASK |
+   UVD_POWER_STATUS__UVD_PG_MODE_MASK);
+   else
+   WREG32(mmUVD_POWER_STATUS,
+   UVD_POWER_STATUS__UVD_PG_EN_MASK);
to uvd_v6_0_start.  no need to visit adev in powerplay and dpm.  And uvd test 
also can pass.

2.  for the lock, we can just use pm.mutex.

3.  please also delete enable_clock_power_gatings_tasks in resume_action_chain 
in a separate patch for powerplay.

4.  do we need to add cg_state, pg_state?



Best Regards
Rex


> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu.h|  6 ++
>  drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c|  5 -
>  drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c  |  8 ---
>  drivers/gpu/drm/amd/amdgpu/vi.c| 12 ---
>  .../drm/amd/powerplay/hwmgr/cz_clockpowergating.c  | 25 
> ++
>  drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c |  7 ++
>  6 files changed, 43 insertions(+), 20 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> index d0460ea2f85b..5616b16e6c0a 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> @@ -1692,6 +1692,7 @@ struct amdgpu_uvd {
> uint32_tsrbm_soft_reset;
> int cg_state, pg_state;
> struct mutexpg_lock;
> +   boolis_init;
>  };
>
>  /*
> @@ -2518,5 +2519,10 @@ int amdgpu_dm_display_resume(struct
> amdgpu_device *adev );  static inline int
> amdgpu_dm_display_resume(struct amdgpu_device *adev) { return 0; }
> #endif
>
> +struct amdgpu_cgs_device {
> +   struct cgs_device base;
> +   struct amdgpu_device *adev;
> +};
> +
>  #include "amdgpu_object.h"
>  #endif
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
> index ee95e950a19b..d553e399a835 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
> @@ -33,11 +33,6 @@
>  #include "atom.h"
>  #include "amdgpu_ucode.h"
>
> -struct amdgpu_cgs_device {
> -   struct cgs_device base;
> -   struct amdgpu_device *adev;
> -};
> -
>  #define CGS_FUNC_ADEV  \
> struct amdgpu_device *adev =\
> ((struct amdgpu_cgs_device *)cgs_device)->adev diff
> --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
> b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
> index 422d5300b92e..3b93327c5e25 100644
> --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
> @@ -389,9 +389,9 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
> uint32_t mp_swap_cntl;
> int i, j, r;
>
> -   /* is power gated? then we can't start (TODO: re-enable power) */
> -   if (adev->uv

Re: [PATCH 4/4] drm/amd/powerplay: Prevent UVD powerdown before init

2016-07-28 Thread StDenis, Tom
Quick question, how am I meant to get access to pm.mutex from powerplay?


I need a lock I can see around the SMU calls and in the amdgpu side (for 
userspace locking).


Tom



From: Zhu, Rex
Sent: Thursday, July 28, 2016 03:43
To: Alex Deucher; Tom St Denis
Cc: StDenis, Tom; amd-gfx list
Subject: RE: [PATCH 4/4] drm/amd/powerplay: Prevent UVD powerdown before init


From: amd-gfx [mailto:amd-gfx-boun...@lists.freedesktop.org] On Behalf Of Alex 
Deucher
Sent: Thursday, July 28, 2016 1:46 PM
To: Tom St Denis
Cc: StDenis, Tom; amd-gfx list
Subject: Re: [PATCH 4/4] drm/amd/powerplay: Prevent UVD powerdown before init

On Tue, Jul 26, 2016 at 11:38 AM, Tom St Denis  wrote:
> Because of the ip_blocks init order powerplay would power down the UVD
> block before UVD start is called.  This results in a VCPU hang.
>
> This patch prevents power down before UVD is initialized.
>
> Also correct the power up order so clocking is set after power is
> ungated.
>
> With this applied comparable clock/power behaviour to powerplay=0 with
> DPM is observed.
>
> Signed-off-by: Tom St Denis 

This patch needs to be split into several patches and reworked a bit.
Also, don't include amdgpu.h in powerplay.  We have cgs for access to registers 
and data from adev, etc.  The idea is to minimize the dependencies between 
components.  We shouldn't be accessing adev directly in powerplay.  A couple 
more comments inline.


Rex:  I also think so.
1. We can move
+   WREG32(mmUVD_POWER_STATUS,
+   UVD_POWER_STATUS__UVD_PG_EN_MASK |
+   UVD_POWER_STATUS__UVD_PG_MODE_MASK);
+   else
+   WREG32(mmUVD_POWER_STATUS,
+   UVD_POWER_STATUS__UVD_PG_EN_MASK);
to uvd_v6_0_start.  no need to visit adev in powerplay and dpm.  And uvd test 
also can pass.

2.  for the lock, we can just use pm.mutex.

3.  please also delete enable_clock_power_gatings_tasks in resume_action_chain 
in a separate patch for powerplay.

4.  do we need to add cg_state, pg_state?



Best Regards
Rex


> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu.h|  6 ++
>  drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c|  5 -
>  drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c  |  8 ---
>  drivers/gpu/drm/amd/amdgpu/vi.c| 12 ---
>  .../drm/amd/powerplay/hwmgr/cz_clockpowergating.c  | 25 
> ++
>  drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c |  7 ++
>  6 files changed, 43 insertions(+), 20 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> index d0460ea2f85b..5616b16e6c0a 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> @@ -1692,6 +1692,7 @@ struct amdgpu_uvd {
> uint32_tsrbm_soft_reset;
> int cg_state, pg_state;
> struct mutexpg_lock;
> +   boolis_init;
>  };
>
>  /*
> @@ -2518,5 +2519,10 @@ int amdgpu_dm_display_resume(struct
> amdgpu_device *adev );  static inline int
> amdgpu_dm_display_resume(struct amdgpu_device *adev) { return 0; }
> #endif
>
> +struct amdgpu_cgs_device {
> +   struct cgs_device base;
> +   struct amdgpu_device *adev;
> +};
> +
>  #include "amdgpu_object.h"
>  #endif
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
> index ee95e950a19b..d553e399a835 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
> @@ -33,11 +33,6 @@
>  #include "atom.h"
>  #include "amdgpu_ucode.h"
>
> -struct amdgpu_cgs_device {
> -   struct cgs_device base;
> -   struct amdgpu_device *adev;
> -};
> -
>  #define CGS_FUNC_ADEV  \
> struct amdgpu_device *adev =\
> ((struct amdgpu_cgs_device *)cgs_device)->adev diff
> --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
> b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
> index 422d5300b92e..3b93327c5e25 100644
> --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
> @@ -389,9 +389,9 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
> uint32_t mp_swap_cntl;
> int i, j, r;
>
> -   /* is power gated? then we can't start (TODO: re-enable power) */
> -   if (adev->uvd.pg_state)
> -   return -EINVAL;
> +   /* is power gated? then we can't start but don't return an error */
> +   if (adev->uvd.is_init && adev->uvd.pg_state)
> +   return 0;
>
> /* set CG state to -1 for unset */
> adev->uvd.cg_state = -1;
> @@ -662,6 +662,8 @@ static int uvd_v6_0_ring_test_ring(struct amdgpu_ring 
> *ring)
>   ring->idx, tmp);
> r = -EINVAL;
> }
> +   if 

Re: [PATCH 4/4] drm/amd/powerplay: Prevent UVD powerdown before init

2016-07-28 Thread StDenis, Tom
Nevermind I moved the locking into amdgpu_pm.c and that did the trick.


Attached is a patch that contains all the changes.  If you guys want to give it 
a quick once-through I can then start splitting it up per Alex's comments.


Tom



From: amd-gfx  on behalf of StDenis, Tom 

Sent: Thursday, July 28, 2016 07:10
To: Zhu, Rex; Alex Deucher
Cc: amd-gfx list
Subject: Re: [PATCH 4/4] drm/amd/powerplay: Prevent UVD powerdown before init


Quick question, how am I meant to get access to pm.mutex from powerplay?


I need a lock I can see around the SMU calls and in the amdgpu side (for 
userspace locking).


Tom



From: Zhu, Rex
Sent: Thursday, July 28, 2016 03:43
To: Alex Deucher; Tom St Denis
Cc: StDenis, Tom; amd-gfx list
Subject: RE: [PATCH 4/4] drm/amd/powerplay: Prevent UVD powerdown before init


From: amd-gfx [mailto:amd-gfx-boun...@lists.freedesktop.org] On Behalf Of Alex 
Deucher
Sent: Thursday, July 28, 2016 1:46 PM
To: Tom St Denis
Cc: StDenis, Tom; amd-gfx list
Subject: Re: [PATCH 4/4] drm/amd/powerplay: Prevent UVD powerdown before init

On Tue, Jul 26, 2016 at 11:38 AM, Tom St Denis  wrote:
> Because of the ip_blocks init order powerplay would power down the UVD
> block before UVD start is called.  This results in a VCPU hang.
>
> This patch prevents power down before UVD is initialized.
>
> Also correct the power up order so clocking is set after power is
> ungated.
>
> With this applied comparable clock/power behaviour to powerplay=0 with
> DPM is observed.
>
> Signed-off-by: Tom St Denis 

This patch needs to be split into several patches and reworked a bit.
Also, don't include amdgpu.h in powerplay.  We have cgs for access to registers 
and data from adev, etc.  The idea is to minimize the dependencies between 
components.  We shouldn't be accessing adev directly in powerplay.  A couple 
more comments inline.


Rex:  I also think so.
1. We can move
+   WREG32(mmUVD_POWER_STATUS,
+   UVD_POWER_STATUS__UVD_PG_EN_MASK |
+   UVD_POWER_STATUS__UVD_PG_MODE_MASK);
+   else
+   WREG32(mmUVD_POWER_STATUS,
+   UVD_POWER_STATUS__UVD_PG_EN_MASK);
to uvd_v6_0_start.  no need to visit adev in powerplay and dpm.  And uvd test 
also can pass.

2.  for the lock, we can just use pm.mutex.

3.  please also delete enable_clock_power_gatings_tasks in resume_action_chain 
in a separate patch for powerplay.

4.  do we need to add cg_state, pg_state?



Best Regards
Rex


> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu.h|  6 ++
>  drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c|  5 -
>  drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c  |  8 ---
>  drivers/gpu/drm/amd/amdgpu/vi.c| 12 ---
>  .../drm/amd/powerplay/hwmgr/cz_clockpowergating.c  | 25 
> ++
>  drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c |  7 ++
>  6 files changed, 43 insertions(+), 20 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> index d0460ea2f85b..5616b16e6c0a 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> @@ -1692,6 +1692,7 @@ struct amdgpu_uvd {
> uint32_tsrbm_soft_reset;
> int cg_state, pg_state;
> struct mutexpg_lock;
> +   boolis_init;
>  };
>
>  /*
> @@ -2518,5 +2519,10 @@ int amdgpu_dm_display_resume(struct
> amdgpu_device *adev );  static inline int
> amdgpu_dm_display_resume(struct amdgpu_device *adev) { return 0; }
> #endif
>
> +struct amdgpu_cgs_device {
> +   struct cgs_device base;
> +   struct amdgpu_device *adev;
> +};
> +
>  #include "amdgpu_object.h"
>  #endif
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
> index ee95e950a19b..d553e399a835 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
> @@ -33,11 +33,6 @@
>  #include "atom.h"
>  #include "amdgpu_ucode.h"
>
> -struct amdgpu_cgs_device {
> -   struct cgs_device base;
> -   struct amdgpu_device *adev;
> -};
> -
>  #define CGS_FUNC_ADEV  \
> struct amdgpu_device *adev =\
> ((struct amdgpu_cgs_device *)cgs_device)->adev diff
> --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
> b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
> index 422d5300b92e..3b93327c5e25 100644
> --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
> @@ -389,9 +389,9 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
> uint32_t mp_swap_cntl;
> int i, j, r;
>
> -   /* is power gated? then we can't start (TODO: re-enable power) */
> -   if (adev->uvd.p

Re: [PATCH 4/4] drm/amd/powerplay: Prevent UVD powerdown before init

2016-07-28 Thread Zhu, Rex

Looks good to me.


Best Regards

Rex


From: StDenis, Tom
Sent: Thursday, July 28, 2016 8:19:52 PM
To: Zhu, Rex; Alex Deucher
Cc: amd-gfx list
Subject: Re: [PATCH 4/4] drm/amd/powerplay: Prevent UVD powerdown before init


Nevermind I moved the locking into amdgpu_pm.c and that did the trick.


Attached is a patch that contains all the changes.  If you guys want to give it 
a quick once-through I can then start splitting it up per Alex's comments.


Tom



From: amd-gfx  on behalf of StDenis, Tom 

Sent: Thursday, July 28, 2016 07:10
To: Zhu, Rex; Alex Deucher
Cc: amd-gfx list
Subject: Re: [PATCH 4/4] drm/amd/powerplay: Prevent UVD powerdown before init


Quick question, how am I meant to get access to pm.mutex from powerplay?


I need a lock I can see around the SMU calls and in the amdgpu side (for 
userspace locking).


Tom



From: Zhu, Rex
Sent: Thursday, July 28, 2016 03:43
To: Alex Deucher; Tom St Denis
Cc: StDenis, Tom; amd-gfx list
Subject: RE: [PATCH 4/4] drm/amd/powerplay: Prevent UVD powerdown before init


From: amd-gfx [mailto:amd-gfx-boun...@lists.freedesktop.org] On Behalf Of Alex 
Deucher
Sent: Thursday, July 28, 2016 1:46 PM
To: Tom St Denis
Cc: StDenis, Tom; amd-gfx list
Subject: Re: [PATCH 4/4] drm/amd/powerplay: Prevent UVD powerdown before init

On Tue, Jul 26, 2016 at 11:38 AM, Tom St Denis  wrote:
> Because of the ip_blocks init order powerplay would power down the UVD
> block before UVD start is called.  This results in a VCPU hang.
>
> This patch prevents power down before UVD is initialized.
>
> Also correct the power up order so clocking is set after power is
> ungated.
>
> With this applied comparable clock/power behaviour to powerplay=0 with
> DPM is observed.
>
> Signed-off-by: Tom St Denis 

This patch needs to be split into several patches and reworked a bit.
Also, don't include amdgpu.h in powerplay.  We have cgs for access to registers 
and data from adev, etc.  The idea is to minimize the dependencies between 
components.  We shouldn't be accessing adev directly in powerplay.  A couple 
more comments inline.


Rex:  I also think so.
1. We can move
+   WREG32(mmUVD_POWER_STATUS,
+   UVD_POWER_STATUS__UVD_PG_EN_MASK |
+   UVD_POWER_STATUS__UVD_PG_MODE_MASK);
+   else
+   WREG32(mmUVD_POWER_STATUS,
+   UVD_POWER_STATUS__UVD_PG_EN_MASK);
to uvd_v6_0_start.  no need to visit adev in powerplay and dpm.  And uvd test 
also can pass.

2.  for the lock, we can just use pm.mutex.

3.  please also delete enable_clock_power_gatings_tasks in resume_action_chain 
in a separate patch for powerplay.

4.  do we need to add cg_state, pg_state?



Best Regards
Rex


> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu.h|  6 ++
>  drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c|  5 -
>  drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c  |  8 ---
>  drivers/gpu/drm/amd/amdgpu/vi.c| 12 ---
>  .../drm/amd/powerplay/hwmgr/cz_clockpowergating.c  | 25 
> ++
>  drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c |  7 ++
>  6 files changed, 43 insertions(+), 20 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> index d0460ea2f85b..5616b16e6c0a 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> @@ -1692,6 +1692,7 @@ struct amdgpu_uvd {
> uint32_tsrbm_soft_reset;
> int cg_state, pg_state;
> struct mutexpg_lock;
> +   boolis_init;
>  };
>
>  /*
> @@ -2518,5 +2519,10 @@ int amdgpu_dm_display_resume(struct
> amdgpu_device *adev );  static inline int
> amdgpu_dm_display_resume(struct amdgpu_device *adev) { return 0; }
> #endif
>
> +struct amdgpu_cgs_device {
> +   struct cgs_device base;
> +   struct amdgpu_device *adev;
> +};
> +
>  #include "amdgpu_object.h"
>  #endif
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
> index ee95e950a19b..d553e399a835 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
> @@ -33,11 +33,6 @@
>  #include "atom.h"
>  #include "amdgpu_ucode.h"
>
> -struct amdgpu_cgs_device {
> -   struct cgs_device base;
> -   struct amdgpu_device *adev;
> -};
> -
>  #define CGS_FUNC_ADEV  \
> struct amdgpu_device *adev =\
> ((struct amdgpu_cgs_device *)cgs_device)->adev diff
> --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
> b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
> index 422d5300b92e..3b93327c5e25 100644
> --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
> +++ b/drivers/gpu/drm/

Re: [PATCH 4/4] drm/amd/powerplay: Prevent UVD powerdown before init

2016-07-28 Thread StDenis, Tom
Hi Rex,


Thanks.  BTW I fixed the one liner {} in the PP code (removed the {} braces) in 
my worktree after I sent that in case anyone notices that :-)


Tom



From: Zhu, Rex
Sent: Thursday, July 28, 2016 08:43
To: StDenis, Tom; Alex Deucher
Cc: amd-gfx list
Subject: Re: [PATCH 4/4] drm/amd/powerplay: Prevent UVD powerdown before init



Looks good to me.


Best Regards

Rex


From: StDenis, Tom
Sent: Thursday, July 28, 2016 8:19:52 PM
To: Zhu, Rex; Alex Deucher
Cc: amd-gfx list
Subject: Re: [PATCH 4/4] drm/amd/powerplay: Prevent UVD powerdown before init


Nevermind I moved the locking into amdgpu_pm.c and that did the trick.


Attached is a patch that contains all the changes.  If you guys want to give it 
a quick once-through I can then start splitting it up per Alex's comments.


Tom



From: amd-gfx  on behalf of StDenis, Tom 

Sent: Thursday, July 28, 2016 07:10
To: Zhu, Rex; Alex Deucher
Cc: amd-gfx list
Subject: Re: [PATCH 4/4] drm/amd/powerplay: Prevent UVD powerdown before init


Quick question, how am I meant to get access to pm.mutex from powerplay?


I need a lock I can see around the SMU calls and in the amdgpu side (for 
userspace locking).


Tom



From: Zhu, Rex
Sent: Thursday, July 28, 2016 03:43
To: Alex Deucher; Tom St Denis
Cc: StDenis, Tom; amd-gfx list
Subject: RE: [PATCH 4/4] drm/amd/powerplay: Prevent UVD powerdown before init


From: amd-gfx [mailto:amd-gfx-boun...@lists.freedesktop.org] On Behalf Of Alex 
Deucher
Sent: Thursday, July 28, 2016 1:46 PM
To: Tom St Denis
Cc: StDenis, Tom; amd-gfx list
Subject: Re: [PATCH 4/4] drm/amd/powerplay: Prevent UVD powerdown before init

On Tue, Jul 26, 2016 at 11:38 AM, Tom St Denis  wrote:
> Because of the ip_blocks init order powerplay would power down the UVD
> block before UVD start is called.  This results in a VCPU hang.
>
> This patch prevents power down before UVD is initialized.
>
> Also correct the power up order so clocking is set after power is
> ungated.
>
> With this applied comparable clock/power behaviour to powerplay=0 with
> DPM is observed.
>
> Signed-off-by: Tom St Denis 

This patch needs to be split into several patches and reworked a bit.
Also, don't include amdgpu.h in powerplay.  We have cgs for access to registers 
and data from adev, etc.  The idea is to minimize the dependencies between 
components.  We shouldn't be accessing adev directly in powerplay.  A couple 
more comments inline.


Rex:  I also think so.
1. We can move
+   WREG32(mmUVD_POWER_STATUS,
+   UVD_POWER_STATUS__UVD_PG_EN_MASK |
+   UVD_POWER_STATUS__UVD_PG_MODE_MASK);
+   else
+   WREG32(mmUVD_POWER_STATUS,
+   UVD_POWER_STATUS__UVD_PG_EN_MASK);
to uvd_v6_0_start.  no need to visit adev in powerplay and dpm.  And uvd test 
also can pass.

2.  for the lock, we can just use pm.mutex.

3.  please also delete enable_clock_power_gatings_tasks in resume_action_chain 
in a separate patch for powerplay.

4.  do we need to add cg_state, pg_state?



Best Regards
Rex


> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu.h|  6 ++
>  drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c|  5 -
>  drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c  |  8 ---
>  drivers/gpu/drm/amd/amdgpu/vi.c| 12 ---
>  .../drm/amd/powerplay/hwmgr/cz_clockpowergating.c  | 25 
> ++
>  drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c |  7 ++
>  6 files changed, 43 insertions(+), 20 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> index d0460ea2f85b..5616b16e6c0a 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> @@ -1692,6 +1692,7 @@ struct amdgpu_uvd {
> uint32_tsrbm_soft_reset;
> int cg_state, pg_state;
> struct mutexpg_lock;
> +   boolis_init;
>  };
>
>  /*
> @@ -2518,5 +2519,10 @@ int amdgpu_dm_display_resume(struct
> amdgpu_device *adev );  static inline int
> amdgpu_dm_display_resume(struct amdgpu_device *adev) { return 0; }
> #endif
>
> +struct amdgpu_cgs_device {
> +   struct cgs_device base;
> +   struct amdgpu_device *adev;
> +};
> +
>  #include "amdgpu_object.h"
>  #endif
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
> index ee95e950a19b..d553e399a835 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
> @@ -33,11 +33,6 @@
>  #include "atom.h"
>  #include "amdgpu_ucode.h"
>
> -struct amdgpu_cgs_device {
> -   struct cgs_device base;
> -   struct amdgpu_device *adev;
> -};
> -
>  #define CGS_FUNC_ADEV   

Re: [PATCH 4/4] drm/amd/powerplay: Prevent UVD powerdown before init

2016-07-28 Thread Zhu, Rex
you mean

+if (cz_hwmgr->uvd_power_gated == bgate) {
 return 0;
+}


I didn't pay any attention at first.


Best Regards

Rex



From: StDenis, Tom
Sent: Thursday, July 28, 2016 8:44:11 PM
To: Zhu, Rex; Alex Deucher
Cc: amd-gfx list
Subject: Re: [PATCH 4/4] drm/amd/powerplay: Prevent UVD powerdown before init


Hi Rex,


Thanks.  BTW I fixed the one liner {} in the PP code (removed the {} braces) in 
my worktree after I sent that in case anyone notices that :-)


Tom



From: Zhu, Rex
Sent: Thursday, July 28, 2016 08:43
To: StDenis, Tom; Alex Deucher
Cc: amd-gfx list
Subject: Re: [PATCH 4/4] drm/amd/powerplay: Prevent UVD powerdown before init



Looks good to me.


Best Regards

Rex


From: StDenis, Tom
Sent: Thursday, July 28, 2016 8:19:52 PM
To: Zhu, Rex; Alex Deucher
Cc: amd-gfx list
Subject: Re: [PATCH 4/4] drm/amd/powerplay: Prevent UVD powerdown before init


Nevermind I moved the locking into amdgpu_pm.c and that did the trick.


Attached is a patch that contains all the changes.  If you guys want to give it 
a quick once-through I can then start splitting it up per Alex's comments.


Tom



From: amd-gfx  on behalf of StDenis, Tom 

Sent: Thursday, July 28, 2016 07:10
To: Zhu, Rex; Alex Deucher
Cc: amd-gfx list
Subject: Re: [PATCH 4/4] drm/amd/powerplay: Prevent UVD powerdown before init


Quick question, how am I meant to get access to pm.mutex from powerplay?


I need a lock I can see around the SMU calls and in the amdgpu side (for 
userspace locking).


Tom



From: Zhu, Rex
Sent: Thursday, July 28, 2016 03:43
To: Alex Deucher; Tom St Denis
Cc: StDenis, Tom; amd-gfx list
Subject: RE: [PATCH 4/4] drm/amd/powerplay: Prevent UVD powerdown before init


From: amd-gfx [mailto:amd-gfx-boun...@lists.freedesktop.org] On Behalf Of Alex 
Deucher
Sent: Thursday, July 28, 2016 1:46 PM
To: Tom St Denis
Cc: StDenis, Tom; amd-gfx list
Subject: Re: [PATCH 4/4] drm/amd/powerplay: Prevent UVD powerdown before init

On Tue, Jul 26, 2016 at 11:38 AM, Tom St Denis  wrote:
> Because of the ip_blocks init order powerplay would power down the UVD
> block before UVD start is called.  This results in a VCPU hang.
>
> This patch prevents power down before UVD is initialized.
>
> Also correct the power up order so clocking is set after power is
> ungated.
>
> With this applied comparable clock/power behaviour to powerplay=0 with
> DPM is observed.
>
> Signed-off-by: Tom St Denis 

This patch needs to be split into several patches and reworked a bit.
Also, don't include amdgpu.h in powerplay.  We have cgs for access to registers 
and data from adev, etc.  The idea is to minimize the dependencies between 
components.  We shouldn't be accessing adev directly in powerplay.  A couple 
more comments inline.


Rex:  I also think so.
1. We can move
+   WREG32(mmUVD_POWER_STATUS,
+   UVD_POWER_STATUS__UVD_PG_EN_MASK |
+   UVD_POWER_STATUS__UVD_PG_MODE_MASK);
+   else
+   WREG32(mmUVD_POWER_STATUS,
+   UVD_POWER_STATUS__UVD_PG_EN_MASK);
to uvd_v6_0_start.  no need to visit adev in powerplay and dpm.  And uvd test 
also can pass.

2.  for the lock, we can just use pm.mutex.

3.  please also delete enable_clock_power_gatings_tasks in resume_action_chain 
in a separate patch for powerplay.

4.  do we need to add cg_state, pg_state?



Best Regards
Rex


> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu.h|  6 ++
>  drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c|  5 -
>  drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c  |  8 ---
>  drivers/gpu/drm/amd/amdgpu/vi.c| 12 ---
>  .../drm/amd/powerplay/hwmgr/cz_clockpowergating.c  | 25 
> ++
>  drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c |  7 ++
>  6 files changed, 43 insertions(+), 20 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> index d0460ea2f85b..5616b16e6c0a 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> @@ -1692,6 +1692,7 @@ struct amdgpu_uvd {
> uint32_tsrbm_soft_reset;
> int cg_state, pg_state;
> struct mutexpg_lock;
> +   boolis_init;
>  };
>
>  /*
> @@ -2518,5 +2519,10 @@ int amdgpu_dm_display_resume(struct
> amdgpu_device *adev );  static inline int
> amdgpu_dm_display_resume(struct amdgpu_device *adev) { return 0; }
> #endif
>
> +struct amdgpu_cgs_device {
> +   struct cgs_device base;
> +   struct amdgpu_device *adev;
> +};
> +
>  #include "amdgpu_object.h"
>  #endif
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
> index ee95e

Re: [PATCH 4/4] drm/amd/powerplay: Prevent UVD powerdown before init

2016-07-28 Thread Alex Deucher
On Thu, Jul 28, 2016 at 8:59 AM, StDenis, Tom  wrote:
> Yup, I fixed that in my worktree already.


Looks good to me.

Alex

>
>
> Tom
>
>
>
> 
> From: Zhu, Rex
> Sent: Thursday, July 28, 2016 08:59
>
> To: StDenis, Tom; Alex Deucher
> Cc: amd-gfx list
> Subject: Re: [PATCH 4/4] drm/amd/powerplay: Prevent UVD powerdown before
> init
>
>
> you mean
>
> +if (cz_hwmgr->uvd_power_gated == bgate) {
>  return 0;
> +}
>
>
> I didn't pay any attention at first.
>
>
> Best Regards
>
> Rex
>
>
> 
> From: StDenis, Tom
> Sent: Thursday, July 28, 2016 8:44:11 PM
> To: Zhu, Rex; Alex Deucher
> Cc: amd-gfx list
> Subject: Re: [PATCH 4/4] drm/amd/powerplay: Prevent UVD powerdown before
> init
>
>
> Hi Rex,
>
>
> Thanks.  BTW I fixed the one liner {} in the PP code (removed the {} braces)
> in my worktree after I sent that in case anyone notices that :-)
>
>
> Tom
>
>
>
> 
> From: Zhu, Rex
> Sent: Thursday, July 28, 2016 08:43
> To: StDenis, Tom; Alex Deucher
> Cc: amd-gfx list
> Subject: Re: [PATCH 4/4] drm/amd/powerplay: Prevent UVD powerdown before
> init
>
>
> Looks good to me.
>
>
> Best Regards
>
> Rex
>
> 
> From: StDenis, Tom
> Sent: Thursday, July 28, 2016 8:19:52 PM
> To: Zhu, Rex; Alex Deucher
> Cc: amd-gfx list
> Subject: Re: [PATCH 4/4] drm/amd/powerplay: Prevent UVD powerdown before
> init
>
>
> Nevermind I moved the locking into amdgpu_pm.c and that did the trick.
>
>
> Attached is a patch that contains all the changes.  If you guys want to give
> it a quick once-through I can then start splitting it up per Alex's
> comments.
>
>
> Tom
>
>
>
> 
> From: amd-gfx  on behalf of StDenis,
> Tom 
> Sent: Thursday, July 28, 2016 07:10
> To: Zhu, Rex; Alex Deucher
> Cc: amd-gfx list
> Subject: Re: [PATCH 4/4] drm/amd/powerplay: Prevent UVD powerdown before
> init
>
>
> Quick question, how am I meant to get access to pm.mutex from powerplay?
>
>
> I need a lock I can see around the SMU calls and in the amdgpu side (for
> userspace locking).
>
>
> Tom
>
>
>
> 
> From: Zhu, Rex
> Sent: Thursday, July 28, 2016 03:43
> To: Alex Deucher; Tom St Denis
> Cc: StDenis, Tom; amd-gfx list
> Subject: RE: [PATCH 4/4] drm/amd/powerplay: Prevent UVD powerdown before
> init
>
>
> From: amd-gfx [mailto:amd-gfx-boun...@lists.freedesktop.org] On Behalf Of
> Alex Deucher
> Sent: Thursday, July 28, 2016 1:46 PM
> To: Tom St Denis
> Cc: StDenis, Tom; amd-gfx list
> Subject: Re: [PATCH 4/4] drm/amd/powerplay: Prevent UVD powerdown before
> init
>
> On Tue, Jul 26, 2016 at 11:38 AM, Tom St Denis  wrote:
>> Because of the ip_blocks init order powerplay would power down the UVD
>> block before UVD start is called.  This results in a VCPU hang.
>>
>> This patch prevents power down before UVD is initialized.
>>
>> Also correct the power up order so clocking is set after power is
>> ungated.
>>
>> With this applied comparable clock/power behaviour to powerplay=0 with
>> DPM is observed.
>>
>> Signed-off-by: Tom St Denis 
>
> This patch needs to be split into several patches and reworked a bit.
> Also, don't include amdgpu.h in powerplay.  We have cgs for access to
> registers and data from adev, etc.  The idea is to minimize the dependencies
> between components.  We shouldn't be accessing adev directly in powerplay.
> A couple more comments inline.
>
>
> Rex:  I also think so.
> 1. We can move
> +   WREG32(mmUVD_POWER_STATUS,
> +   UVD_POWER_STATUS__UVD_PG_EN_MASK |
> +   UVD_POWER_STATUS__UVD_PG_MODE_MASK);
> +   else
> +   WREG32(mmUVD_POWER_STATUS,
> +   UVD_POWER_STATUS__UVD_PG_EN_MASK);
> to uvd_v6_0_start.  no need to visit adev in powerplay and dpm.  And uvd
> test also can pass.
>
> 2.  for the lock, we can just use pm.mutex.
>
> 3.  please also delete enable_clock_power_gatings_tasks in
> resume_action_chain in a separate patch for powerplay.
>
> 4.  do we need to add cg_state, pg_state?
>
>
>
> Best Regards
> Rex
>
>
>> ---
>>  drivers/gpu/drm/amd/amdgpu/amdgpu.h|  6 ++
>>  drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c|  5 -
>>  drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c  |  8 ---
>>  drivers/gpu/drm/amd/amdgpu/vi.c| 12 ---
>>  .../drm/amd/powerplay/hwmgr/cz_clockpowergating.c  | 25
>> ++
>>  drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c |  7 ++
>>  6 files changed, 43 insertions(+), 20 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>> index d0460ea2f85b..5616b16e6c0a 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>> @@ -1692,6 +1692,7 @@ struct amdgpu_uvd {
>> uint32_t   

Re: [PATCH 4/4] drm/amd/powerplay: Prevent UVD powerdown before init

2016-07-28 Thread StDenis, Tom
Yup, I fixed that in my worktree already.


Tom



From: Zhu, Rex
Sent: Thursday, July 28, 2016 08:59
To: StDenis, Tom; Alex Deucher
Cc: amd-gfx list
Subject: Re: [PATCH 4/4] drm/amd/powerplay: Prevent UVD powerdown before init


you mean

+if (cz_hwmgr->uvd_power_gated == bgate) {
 return 0;
+}


I didn't pay any attention at first.


Best Regards

Rex



From: StDenis, Tom
Sent: Thursday, July 28, 2016 8:44:11 PM
To: Zhu, Rex; Alex Deucher
Cc: amd-gfx list
Subject: Re: [PATCH 4/4] drm/amd/powerplay: Prevent UVD powerdown before init


Hi Rex,


Thanks.  BTW I fixed the one liner {} in the PP code (removed the {} braces) in 
my worktree after I sent that in case anyone notices that :-)


Tom



From: Zhu, Rex
Sent: Thursday, July 28, 2016 08:43
To: StDenis, Tom; Alex Deucher
Cc: amd-gfx list
Subject: Re: [PATCH 4/4] drm/amd/powerplay: Prevent UVD powerdown before init



Looks good to me.


Best Regards

Rex


From: StDenis, Tom
Sent: Thursday, July 28, 2016 8:19:52 PM
To: Zhu, Rex; Alex Deucher
Cc: amd-gfx list
Subject: Re: [PATCH 4/4] drm/amd/powerplay: Prevent UVD powerdown before init


Nevermind I moved the locking into amdgpu_pm.c and that did the trick.


Attached is a patch that contains all the changes.  If you guys want to give it 
a quick once-through I can then start splitting it up per Alex's comments.


Tom



From: amd-gfx  on behalf of StDenis, Tom 

Sent: Thursday, July 28, 2016 07:10
To: Zhu, Rex; Alex Deucher
Cc: amd-gfx list
Subject: Re: [PATCH 4/4] drm/amd/powerplay: Prevent UVD powerdown before init


Quick question, how am I meant to get access to pm.mutex from powerplay?


I need a lock I can see around the SMU calls and in the amdgpu side (for 
userspace locking).


Tom



From: Zhu, Rex
Sent: Thursday, July 28, 2016 03:43
To: Alex Deucher; Tom St Denis
Cc: StDenis, Tom; amd-gfx list
Subject: RE: [PATCH 4/4] drm/amd/powerplay: Prevent UVD powerdown before init


From: amd-gfx [mailto:amd-gfx-boun...@lists.freedesktop.org] On Behalf Of Alex 
Deucher
Sent: Thursday, July 28, 2016 1:46 PM
To: Tom St Denis
Cc: StDenis, Tom; amd-gfx list
Subject: Re: [PATCH 4/4] drm/amd/powerplay: Prevent UVD powerdown before init

On Tue, Jul 26, 2016 at 11:38 AM, Tom St Denis  wrote:
> Because of the ip_blocks init order powerplay would power down the UVD
> block before UVD start is called.  This results in a VCPU hang.
>
> This patch prevents power down before UVD is initialized.
>
> Also correct the power up order so clocking is set after power is
> ungated.
>
> With this applied comparable clock/power behaviour to powerplay=0 with
> DPM is observed.
>
> Signed-off-by: Tom St Denis 

This patch needs to be split into several patches and reworked a bit.
Also, don't include amdgpu.h in powerplay.  We have cgs for access to registers 
and data from adev, etc.  The idea is to minimize the dependencies between 
components.  We shouldn't be accessing adev directly in powerplay.  A couple 
more comments inline.


Rex:  I also think so.
1. We can move
+   WREG32(mmUVD_POWER_STATUS,
+   UVD_POWER_STATUS__UVD_PG_EN_MASK |
+   UVD_POWER_STATUS__UVD_PG_MODE_MASK);
+   else
+   WREG32(mmUVD_POWER_STATUS,
+   UVD_POWER_STATUS__UVD_PG_EN_MASK);
to uvd_v6_0_start.  no need to visit adev in powerplay and dpm.  And uvd test 
also can pass.

2.  for the lock, we can just use pm.mutex.

3.  please also delete enable_clock_power_gatings_tasks in resume_action_chain 
in a separate patch for powerplay.

4.  do we need to add cg_state, pg_state?



Best Regards
Rex


> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu.h|  6 ++
>  drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c|  5 -
>  drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c  |  8 ---
>  drivers/gpu/drm/amd/amdgpu/vi.c| 12 ---
>  .../drm/amd/powerplay/hwmgr/cz_clockpowergating.c  | 25 
> ++
>  drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c |  7 ++
>  6 files changed, 43 insertions(+), 20 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> index d0460ea2f85b..5616b16e6c0a 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> @@ -1692,6 +1692,7 @@ struct amdgpu_uvd {
> uint32_tsrbm_soft_reset;
> int cg_state, pg_state;
> struct mutexpg_lock;
> +   boolis_init;
>  };
>
>  /*
> @@ -2518,5 +2519,10 @@ int amdgpu_dm_display_resume(struct
> amdgpu_device *adev );  static inline int
> amdgpu_dm_display_resume(struct amdgpu_device *adev) { return 0; }
> #endif
>
> 

RE: [PATCH 4/4] drm/amd/powerplay: not change uvd/vce block's state when initialize/resume

2016-07-28 Thread Deucher, Alexander
> -Original Message-
> From: amd-gfx [mailto:amd-gfx-boun...@lists.freedesktop.org] On Behalf
> Of Rex Zhu
> Sent: Thursday, July 28, 2016 5:53 AM
> To: amd-gfx@lists.freedesktop.org
> Cc: Zhu, Rex
> Subject: [PATCH 4/4] drm/amd/powerplay: not change uvd/vce block's state
> when initialize/resume
> 
> Change-Id: I6e338a5faeb023b13bb450ecb1c4bb3eaa3b0ac5
> Signed-off-by: Rex Zhu 

For the series:
Reviewed-by: Alex Deucher 

> ---
>  drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c | 2 --
>  1 file changed, 2 deletions(-)
> 
> diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
> b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
> index d6635cc..635fc4b 100644
> --- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
> +++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
> @@ -30,7 +30,6 @@ static const pem_event_action * const initialize_event[]
> = {
>   system_config_tasks,
>   setup_asic_tasks,
>   enable_dynamic_state_management_tasks,
> - enable_clock_power_gatings_tasks,
>   get_2d_performance_state_tasks,
>   set_performance_state_tasks,
>   initialize_thermal_controller_tasks,
> @@ -140,7 +139,6 @@ static const pem_event_action * const
> resume_event[] = {
>   setup_asic_tasks,
>   enable_stutter_mode_tasks, /*must do this in boot state and before
> SMC is started */
>   enable_dynamic_state_management_tasks,
> - enable_clock_power_gatings_tasks,
>   enable_disable_bapm_tasks,
>   initialize_thermal_controller_tasks,
>   get_2d_performance_state_tasks,
> --
> 1.9.1
> 
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH] drm/amdgpu/powerplay: partial revert of endian fixes

2016-07-28 Thread Alex Deucher
This fixes a warning on big endian. Bitfields need to
be handled properly.

Cc: Arnd Bergmann 
Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c 
b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
index 0f5c9d0..7392f71 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
@@ -1289,9 +1289,9 @@ int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, 
uint32_t memory_clock,
int result;
 
memory_clock_parameters.asDPMMCReg.ulClock.ulClockFreq =
-   cpu_to_le32(memory_clock & SET_CLOCK_FREQ_MASK);
+   memory_clock & SET_CLOCK_FREQ_MASK;
memory_clock_parameters.asDPMMCReg.ulClock.ulComputeClockFlag =
-   cpu_to_le32(ADJUST_MC_SETTING_PARAM);
+   ADJUST_MC_SETTING_PARAM;
memory_clock_parameters.asDPMMCReg.ucMclkDPMState = level;
 
result = cgs_atom_exec_cmd_table
-- 
2.5.5

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 4/4] drm/amd/powerplay: not change uvd/vce block's state when initialize/resume

2016-07-28 Thread StDenis, Tom
Um, I had this in my worktree ... so uh do I drop it from mine or what?


Tom



From: amd-gfx  on behalf of Deucher, 
Alexander 
Sent: Thursday, July 28, 2016 09:48
To: Zhu, Rex; amd-gfx@lists.freedesktop.org
Cc: Zhu, Rex
Subject: RE: [PATCH 4/4] drm/amd/powerplay: not change uvd/vce block's state 
when initialize/resume

> -Original Message-
> From: amd-gfx [mailto:amd-gfx-boun...@lists.freedesktop.org] On Behalf
> Of Rex Zhu
> Sent: Thursday, July 28, 2016 5:53 AM
> To: amd-gfx@lists.freedesktop.org
> Cc: Zhu, Rex
> Subject: [PATCH 4/4] drm/amd/powerplay: not change uvd/vce block's state
> when initialize/resume
>
> Change-Id: I6e338a5faeb023b13bb450ecb1c4bb3eaa3b0ac5
> Signed-off-by: Rex Zhu 

For the series:
Reviewed-by: Alex Deucher 

> ---
>  drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c | 2 --
>  1 file changed, 2 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
> b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
> index d6635cc..635fc4b 100644
> --- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
> +++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
> @@ -30,7 +30,6 @@ static const pem_event_action * const initialize_event[]
> = {
>system_config_tasks,
>setup_asic_tasks,
>enable_dynamic_state_management_tasks,
> - enable_clock_power_gatings_tasks,
>get_2d_performance_state_tasks,
>set_performance_state_tasks,
>initialize_thermal_controller_tasks,
> @@ -140,7 +139,6 @@ static const pem_event_action * const
> resume_event[] = {
>setup_asic_tasks,
>enable_stutter_mode_tasks, /*must do this in boot state and before
> SMC is started */
>enable_dynamic_state_management_tasks,
> - enable_clock_power_gatings_tasks,
>enable_disable_bapm_tasks,
>initialize_thermal_controller_tasks,
>get_2d_performance_state_tasks,
> --
> 1.9.1
>
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


RE: [PATCH 4/4] drm/amd/powerplay: not change uvd/vce block's state when initialize/resume

2016-07-28 Thread Deucher, Alexander
Whoever commits it first wins ;)

From: StDenis, Tom
Sent: Thursday, July 28, 2016 10:13 AM
To: Deucher, Alexander; Zhu, Rex; amd-gfx@lists.freedesktop.org
Subject: Re: [PATCH 4/4] drm/amd/powerplay: not change uvd/vce block's state 
when initialize/resume


Um, I had this in my worktree ... so uh do I drop it from mine or what?



Tom


From: amd-gfx 
mailto:amd-gfx-boun...@lists.freedesktop.org>>
 on behalf of Deucher, Alexander 
mailto:alexander.deuc...@amd.com>>
Sent: Thursday, July 28, 2016 09:48
To: Zhu, Rex; 
amd-gfx@lists.freedesktop.org
Cc: Zhu, Rex
Subject: RE: [PATCH 4/4] drm/amd/powerplay: not change uvd/vce block's state 
when initialize/resume

> -Original Message-
> From: amd-gfx [mailto:amd-gfx-boun...@lists.freedesktop.org] On Behalf
> Of Rex Zhu
> Sent: Thursday, July 28, 2016 5:53 AM
> To: amd-gfx@lists.freedesktop.org
> Cc: Zhu, Rex
> Subject: [PATCH 4/4] drm/amd/powerplay: not change uvd/vce block's state
> when initialize/resume
>
> Change-Id: I6e338a5faeb023b13bb450ecb1c4bb3eaa3b0ac5
> Signed-off-by: Rex Zhu mailto:rex@amd.com>>

For the series:
Reviewed-by: Alex Deucher 
mailto:alexander.deuc...@amd.com>>

> ---
>  drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c | 2 --
>  1 file changed, 2 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
> b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
> index d6635cc..635fc4b 100644
> --- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
> +++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
> @@ -30,7 +30,6 @@ static const pem_event_action * const initialize_event[]
> = {
>system_config_tasks,
>setup_asic_tasks,
>enable_dynamic_state_management_tasks,
> - enable_clock_power_gatings_tasks,
>get_2d_performance_state_tasks,
>set_performance_state_tasks,
>initialize_thermal_controller_tasks,
> @@ -140,7 +139,6 @@ static const pem_event_action * const
> resume_event[] = {
>setup_asic_tasks,
>enable_stutter_mode_tasks, /*must do this in boot state and before
> SMC is started */
>enable_dynamic_state_management_tasks,
> - enable_clock_power_gatings_tasks,
>enable_disable_bapm_tasks,
>initialize_thermal_controller_tasks,
>get_2d_performance_state_tasks,
> --
> 1.9.1
>
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


GFX/UVD/VCE PG (v3)

2016-07-28 Thread Tom St Denis
In this respin I've factored out the changes into multiple patches
followed by adding fixes for powerplay (don't gate on init/resume)
as well as reverse the order of the operations when powering up 
(set the clock last).

Finally, this series adds VCE PG tested on both the Carrizo and Stoney
systems I have.


___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 01/14] drm/amd/amdgpu: add mutex locking for both DPM and PP based powergating for UVD/VCE

2016-07-28 Thread Tom St Denis
This adds a mutex lock for both DPM/PP around the changes in
power gating state so that userspace can poll registers without
a race condition on power state.

Signed-off-by: Tom St Denis 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 58 +++---
 1 file changed, 25 insertions(+), 33 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 12ab58eca581..c4bb4ef8f2c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -1105,54 +1105,46 @@ force:
 
 void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
 {
-   if (adev->pp_enabled)
+   if (adev->pp_enabled || adev->pm.funcs->powergate_uvd) {
+   /* enable/disable UVD */
+   mutex_lock(&adev->pm.mutex);
amdgpu_dpm_powergate_uvd(adev, !enable);
-   else {
-   if (adev->pm.funcs->powergate_uvd) {
+   mutex_unlock(&adev->pm.mutex);
+   } else {
+   if (enable) {
mutex_lock(&adev->pm.mutex);
-   /* enable/disable UVD */
-   amdgpu_dpm_powergate_uvd(adev, !enable);
+   adev->pm.dpm.uvd_active = true;
+   adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
mutex_unlock(&adev->pm.mutex);
} else {
-   if (enable) {
-   mutex_lock(&adev->pm.mutex);
-   adev->pm.dpm.uvd_active = true;
-   adev->pm.dpm.state = 
POWER_STATE_TYPE_INTERNAL_UVD;
-   mutex_unlock(&adev->pm.mutex);
-   } else {
-   mutex_lock(&adev->pm.mutex);
-   adev->pm.dpm.uvd_active = false;
-   mutex_unlock(&adev->pm.mutex);
-   }
-   amdgpu_pm_compute_clocks(adev);
+   mutex_lock(&adev->pm.mutex);
+   adev->pm.dpm.uvd_active = false;
+   mutex_unlock(&adev->pm.mutex);
}
-
+   amdgpu_pm_compute_clocks(adev);
}
 }
 
 void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
 {
-   if (adev->pp_enabled)
+   if (adev->pp_enabled || adev->pm.funcs->powergate_vce) {
+   /* enable/disable VCE */
+   mutex_lock(&adev->pm.mutex);
amdgpu_dpm_powergate_vce(adev, !enable);
-   else {
-   if (adev->pm.funcs->powergate_vce) {
+   mutex_unlock(&adev->pm.mutex);
+   } else {
+   if (enable) {
mutex_lock(&adev->pm.mutex);
-   amdgpu_dpm_powergate_vce(adev, !enable);
+   adev->pm.dpm.vce_active = true;
+   /* XXX select vce level based on ring/task */
+   adev->pm.dpm.vce_level = AMDGPU_VCE_LEVEL_AC_ALL;
mutex_unlock(&adev->pm.mutex);
} else {
-   if (enable) {
-   mutex_lock(&adev->pm.mutex);
-   adev->pm.dpm.vce_active = true;
-   /* XXX select vce level based on ring/task */
-   adev->pm.dpm.vce_level = 
AMDGPU_VCE_LEVEL_AC_ALL;
-   mutex_unlock(&adev->pm.mutex);
-   } else {
-   mutex_lock(&adev->pm.mutex);
-   adev->pm.dpm.vce_active = false;
-   mutex_unlock(&adev->pm.mutex);
-   }
-   amdgpu_pm_compute_clocks(adev);
+   mutex_lock(&adev->pm.mutex);
+   adev->pm.dpm.vce_active = false;
+   mutex_unlock(&adev->pm.mutex);
}
+   amdgpu_pm_compute_clocks(adev);
}
 }
 
-- 
2.9.2

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 08/14] drm/amd/powerplay: move clockgating to after ungating power in pp for uvd/vce

2016-07-28 Thread Tom St Denis
Cannot set clockgating state before ungating power.

Signed-off-by: Tom St Denis 
---
 drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c | 14 +++---
 1 file changed, 7 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c 
b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
index 2da548f6337e..2028980f1ed4 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
@@ -177,12 +177,12 @@ int cz_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool 
bgate)
cz_dpm_powerdown_uvd(hwmgr);
} else {
cz_dpm_powerup_uvd(hwmgr);
-   cgs_set_clockgating_state(hwmgr->device,
-   AMD_IP_BLOCK_TYPE_UVD,
-   AMD_PG_STATE_GATE);
cgs_set_powergating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_UVD,
AMD_CG_STATE_UNGATE);
+   cgs_set_clockgating_state(hwmgr->device,
+   AMD_IP_BLOCK_TYPE_UVD,
+   AMD_PG_STATE_GATE);
cz_dpm_update_uvd_dpm(hwmgr, false);
}
 
@@ -211,14 +211,14 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool 
bgate)
} else {
cz_dpm_powerup_vce(hwmgr);
cz_hwmgr->vce_power_gated = false;
-   cgs_set_clockgating_state(
-   hwmgr->device,
-   AMD_IP_BLOCK_TYPE_VCE,
-   AMD_PG_STATE_GATE);
cgs_set_powergating_state(
hwmgr->device,
AMD_IP_BLOCK_TYPE_VCE,
AMD_CG_STATE_UNGATE);
+   cgs_set_clockgating_state(
+   hwmgr->device,
+   AMD_IP_BLOCK_TYPE_VCE,
+   AMD_PG_STATE_GATE);
cz_dpm_update_vce_dpm(hwmgr);
cz_enable_disable_vce_dpm(hwmgr, true);
return 0;
-- 
2.9.2

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 03/14] drm/amd/amdgpu: don't set clockgating in uvd_v6_0_start()

2016-07-28 Thread Tom St Denis
This is handled properly by both DPM and PP externally.

Signed-off-by: Tom St Denis 
---
 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c | 13 -
 1 file changed, 4 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 
b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 4fa50918e886..4dbd5ab29bba 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -396,15 +396,10 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
 
uvd_v6_0_mc_resume(adev);
 
-   /* Set dynamic clock gating in S/W control mode */
-   if (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG) {
-   uvd_v6_0_set_sw_clock_gating(adev);
-   } else {
-   /* disable clock gating */
-   uint32_t data = RREG32(mmUVD_CGC_CTRL);
-   data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
-   WREG32(mmUVD_CGC_CTRL, data);
-   }
+   /* disable clock gating */
+   tmp = RREG32(mmUVD_CGC_CTRL);
+   tmp &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
+   WREG32(mmUVD_CGC_CTRL, tmp);
 
/* disable interupt */
WREG32_P(mmUVD_MASTINT_EN, 0, ~UVD_MASTINT_EN__VCPU_EN_MASK);
-- 
2.9.2

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 06/14] drm/amd/amdgpu: Add error messages to UVD PG in DPM

2016-07-28 Thread Tom St Denis
Signed-off-by: Tom St Denis 
---
 drivers/gpu/drm/amd/amdgpu/cz_dpm.c | 41 +++--
 1 file changed, 35 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c 
b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
index 8ba07e79d4cb..301d0b98e607 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
@@ -2108,29 +2108,58 @@ static void cz_dpm_powergate_uvd(struct amdgpu_device 
*adev, bool gate)
/* disable clockgating so we can properly shut down the 
block */
ret = amdgpu_set_clockgating_state(adev, 
AMD_IP_BLOCK_TYPE_UVD,

AMD_CG_STATE_UNGATE);
+   if (ret) {
+   DRM_ERROR("UVD DPM Power Gating failed to set 
clockgating state\n");
+   return;
+   }
+
/* shutdown the UVD block */
ret = amdgpu_set_powergating_state(adev, 
AMD_IP_BLOCK_TYPE_UVD,
AMD_PG_STATE_GATE);
-   /* XXX: check for errors */
+
+   if (ret) {
+   DRM_ERROR("UVD DPM Power Gating failed to set 
powergating state\n");
+   return;
+   }
}
cz_update_uvd_dpm(adev, gate);
-   if (pi->caps_uvd_pg)
+   if (pi->caps_uvd_pg) {
/* power off the UVD block */
-   cz_send_msg_to_smc(adev, PPSMC_MSG_UVDPowerOFF);
+   ret = cz_send_msg_to_smc(adev, PPSMC_MSG_UVDPowerOFF);
+   if (ret) {
+   DRM_ERROR("UVD DPM Power Gating failed to send 
SMU PowerOFF message\n");
+   return;
+   }
+   }
} else {
if (pi->caps_uvd_pg) {
/* power on the UVD block */
if (pi->uvd_dynamic_pg)
-   cz_send_msg_to_smc_with_parameter(adev, 
PPSMC_MSG_UVDPowerON, 1);
+   ret = cz_send_msg_to_smc_with_parameter(adev, 
PPSMC_MSG_UVDPowerON, 1);
else
-   cz_send_msg_to_smc_with_parameter(adev, 
PPSMC_MSG_UVDPowerON, 0);
+   ret = cz_send_msg_to_smc_with_parameter(adev, 
PPSMC_MSG_UVDPowerON, 0);
+
+   if (ret) {
+   DRM_ERROR("UVD DPM Power Gating Failed to send 
SMU PowerON message\n");
+   return;
+   }
+
/* re-init the UVD block */
ret = amdgpu_set_powergating_state(adev, 
AMD_IP_BLOCK_TYPE_UVD,

AMD_PG_STATE_UNGATE);
+
+   if (ret) {
+   DRM_ERROR("UVD DPM Power Gating Failed to set 
powergating state\n");
+   return;
+   }
+
/* enable clockgating. hw will dynamically gate/ungate 
clocks on the fly */
ret = amdgpu_set_clockgating_state(adev, 
AMD_IP_BLOCK_TYPE_UVD,
AMD_CG_STATE_GATE);
-   /* XXX: check for errors */
+   if (ret) {
+   DRM_ERROR("UVD DPM Power Gating Failed to set 
clockgating state\n");
+   return;
+   }
}
cz_update_uvd_dpm(adev, gate);
}
-- 
2.9.2

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 13/14] drm/amd/amdgpu: Enable stoney UVD PG

2016-07-28 Thread Tom St Denis
Signed-off-by: Tom St Denis 
---
 drivers/gpu/drm/amd/amdgpu/vi.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 1f4cf6d3808e..26d15a446ad0 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -1607,7 +1607,8 @@ static int vi_common_early_init(void *handle)
AMD_CG_SUPPORT_SDMA_LS;
adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
AMD_PG_SUPPORT_GFX_SMG |
-   AMD_PG_SUPPORT_GFX_PIPELINE;
+   AMD_PG_SUPPORT_GFX_PIPELINE |
+   AMD_PG_SUPPORT_UVD;
adev->external_rev_id = adev->rev_id + 0x1;
break;
default:
-- 
2.9.2

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 12/14] drm/amd/amdgpu: Enable stoney GFX PG

2016-07-28 Thread Tom St Denis
Signed-off-by: Tom St Denis 
---
 drivers/gpu/drm/amd/amdgpu/vi.c | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 550fafb01aba..1f4cf6d3808e 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -1605,6 +1605,9 @@ static int vi_common_early_init(void *handle)
AMD_CG_SUPPORT_HDP_LS |
AMD_CG_SUPPORT_SDMA_MGCG |
AMD_CG_SUPPORT_SDMA_LS;
+   adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
+   AMD_PG_SUPPORT_GFX_SMG |
+   AMD_PG_SUPPORT_GFX_PIPELINE;
adev->external_rev_id = adev->rev_id + 0x1;
break;
default:
-- 
2.9.2

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 14/14] drm/amd/amdgpu: Enable stoney VCE PG

2016-07-28 Thread Tom St Denis
Signed-off-by: Tom St Denis 
---
 drivers/gpu/drm/amd/amdgpu/vi.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 26d15a446ad0..33bad99176fe 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -1608,7 +1608,8 @@ static int vi_common_early_init(void *handle)
adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
AMD_PG_SUPPORT_GFX_SMG |
AMD_PG_SUPPORT_GFX_PIPELINE |
-   AMD_PG_SUPPORT_UVD;
+   AMD_PG_SUPPORT_UVD |
+   AMD_PG_SUPPORT_VCE;
adev->external_rev_id = adev->rev_id + 0x1;
break;
default:
-- 
2.9.2

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 10/14] drm/amd/amdgpu: Enable carrizo UVD PG

2016-07-28 Thread Tom St Denis
Signed-off-by: Tom St Denis 
---
 drivers/gpu/drm/amd/amdgpu/vi.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 4fa9fea541a5..f271ee0cc033 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -1583,7 +1583,8 @@ static int vi_common_early_init(void *handle)
if (adev->rev_id != 0x00) {
adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
AMD_PG_SUPPORT_GFX_SMG |
-   AMD_PG_SUPPORT_GFX_PIPELINE;
+   AMD_PG_SUPPORT_GFX_PIPELINE |
+   AMD_PG_SUPPORT_UVD;
}
adev->external_rev_id = adev->rev_id + 0x1;
break;
-- 
2.9.2

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 07/14] drm/amd/powerplay: remove enable_clock_power_gatings_tasks from initialize and resume events

2016-07-28 Thread Tom St Denis
Setting PG state this early would cause lock ups in the IP block
initialized functions.

Signed-off-by: Tom St Denis 
---
 drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c | 2 --
 1 file changed, 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c 
b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
index d6635cc4b0fc..635fc4b48184 100644
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
+++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
@@ -30,7 +30,6 @@ static const pem_event_action * const initialize_event[] = {
system_config_tasks,
setup_asic_tasks,
enable_dynamic_state_management_tasks,
-   enable_clock_power_gatings_tasks,
get_2d_performance_state_tasks,
set_performance_state_tasks,
initialize_thermal_controller_tasks,
@@ -140,7 +139,6 @@ static const pem_event_action * const resume_event[] = {
setup_asic_tasks,
enable_stutter_mode_tasks, /*must do this in boot state and before SMC 
is started */
enable_dynamic_state_management_tasks,
-   enable_clock_power_gatings_tasks,
enable_disable_bapm_tasks,
initialize_thermal_controller_tasks,
get_2d_performance_state_tasks,
-- 
2.9.2

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 11/14] drm/amd/amdgpu: Enable carrizo VCE PG

2016-07-28 Thread Tom St Denis
Signed-off-by: Tom St Denis 
---
 drivers/gpu/drm/amd/amdgpu/vi.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index f271ee0cc033..550fafb01aba 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -1584,7 +1584,8 @@ static int vi_common_early_init(void *handle)
adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
AMD_PG_SUPPORT_GFX_SMG |
AMD_PG_SUPPORT_GFX_PIPELINE |
-   AMD_PG_SUPPORT_UVD;
+   AMD_PG_SUPPORT_UVD |
+   AMD_PG_SUPPORT_VCE;
}
adev->external_rev_id = adev->rev_id + 0x1;
break;
-- 
2.9.2

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 05/14] drm/amd/amdgpu: enable PG_EN bit in powergating UVD

2016-07-28 Thread Tom St Denis
Enable the PG_EN bit just before the SMU would be tasked
with the PG transition.

Signed-off-by: Tom St Denis 
---
 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 
b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 7f2b5de29f67..391457f1eafd 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -997,6 +997,8 @@ static int uvd_v6_0_set_powergating_state(void *handle,
if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
return 0;
 
+   WREG32(mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK);
+
if (state == AMD_PG_STATE_GATE) {
uvd_v6_0_stop(adev);
return 0;
-- 
2.9.2

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 04/14] drm/amd/amdgpu: don't track state in UVD clockgating

2016-07-28 Thread Tom St Denis
There's no need to track CG state anymore.

Signed-off-by: Tom St Denis 
---
 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c | 14 --
 1 file changed, 4 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 
b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 4dbd5ab29bba..7f2b5de29f67 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -959,21 +959,15 @@ static int uvd_v6_0_set_clockgating_state(void *handle,
  enum amd_clockgating_state state)
 {
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-   bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
-   static int curstate = -1;
-
-   if (adev->asic_type == CHIP_FIJI ||
-   adev->asic_type == CHIP_POLARIS10)
-   uvd_v6_set_bypass_mode(adev, enable);
 
if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
return 0;
 
-   if (curstate == state)
-   return 0;
+   if (adev->asic_type == CHIP_FIJI ||
+   adev->asic_type == CHIP_POLARIS10)
+   uvd_v6_set_bypass_mode(adev, state == AMD_CG_STATE_GATE ? true 
: false);
 
-   curstate = state;
-   if (enable) {
+   if (state == AMD_CG_STATE_GATE) {
/* disable HW gating and enable Sw gating */
uvd_v6_0_set_sw_clock_gating(adev);
} else {
-- 
2.9.2

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 09/14] drm/amd/amdgpu: Enable carrizo GFX PG

2016-07-28 Thread Tom St Denis
Signed-off-by: Tom St Denis 
---
 drivers/gpu/drm/amd/amdgpu/vi.c | 6 ++
 1 file changed, 6 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 9ba64989f092..4fa9fea541a5 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -1578,7 +1578,13 @@ static int vi_common_early_init(void *handle)
AMD_CG_SUPPORT_HDP_LS |
AMD_CG_SUPPORT_SDMA_MGCG |
AMD_CG_SUPPORT_SDMA_LS;
+   /* rev0 hardware requires workarounds to support PG */
adev->pg_flags = 0;
+   if (adev->rev_id != 0x00) {
+   adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
+   AMD_PG_SUPPORT_GFX_SMG |
+   AMD_PG_SUPPORT_GFX_PIPELINE;
+   }
adev->external_rev_id = adev->rev_id + 0x1;
break;
case CHIP_STONEY:
-- 
2.9.2

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 02/14] drm/amd/amdgpu: add pm lock to debugfs mmio entry

2016-07-28 Thread Tom St Denis
Adds support for PM locks around access to registers that might
have race conditions on PG transistions.

Signed-off-by: Tom St Denis 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 14 --
 1 file changed, 12 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index eab931a58d06..1fe8ef626407 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2335,22 +2335,26 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, 
char __user *buf,
struct amdgpu_device *adev = f->f_inode->i_private;
ssize_t result = 0;
int r;
-   bool use_bank;
+   bool pm_pg_lock, use_bank;
unsigned instance_bank, sh_bank, se_bank;
 
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
 
+   /* are we reading registers for which a PG lock is necessary? */
+   pm_pg_lock = (*pos >> 23) & 1;
+
if (*pos & (1ULL << 62)) {
se_bank = (*pos >> 24) & 0x3FF;
sh_bank = (*pos >> 34) & 0x3FF;
instance_bank = (*pos >> 44) & 0x3FF;
use_bank = 1;
-   *pos &= 0xFF;
} else {
use_bank = 0;
}
 
+   *pos &= 0x3;
+
if (use_bank) {
if (sh_bank >= adev->gfx.config.max_sh_per_se ||
se_bank >= adev->gfx.config.max_shader_engines)
@@ -2360,6 +2364,9 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, 
char __user *buf,
sh_bank, instance_bank);
}
 
+   if (pm_pg_lock)
+   mutex_lock(&adev->pm.mutex);
+
while (size) {
uint32_t value;
 
@@ -2385,6 +2392,9 @@ end:
mutex_unlock(&adev->grbm_idx_mutex);
}
 
+   if (pm_pg_lock)
+   mutex_unlock(&adev->pm.mutex);
+
return result;
 }
 
-- 
2.9.2

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


答复: 答复: 答复: 答复: 答复: [PATCH] drm/amdgpu: adjust gtt memory size

2016-07-28 Thread Wang, Ken
Hi Christian,

   Patches has been updated and sent for reviewing in another thread, please 
take a look.


发件人: Christian König 
发送时间: 2016年7月22日 21:02:56
收件人: Wang, Ken; Kuehling, Felix; amd-gfx@lists.freedesktop.org
主题: Re: 答复: 答复: 答复: 答复: [PATCH] drm/amdgpu: adjust gtt memory size

Yeah sounds good to me.

Could we make a global ttm function returning how much system memory can be 
used at the same time?

This way we could make those values depend on each other, e.g. when somebody 
modifies the TTM value we automatically change the GART size as well.

Regards,
Christian.

Am 22.07.2016 um 08:06 schrieb Wang, Ken:

thanks Felix,

   I see that logci in TTM as well, so choose 1/2 of system memory as up limit 
seems good enough here? if no object I will fix the overflow issue and send the 
review again.


发件人: amd-gfx 

 代表 Felix Kuehling 
发送时间: 2016年7月22日 3:19:38
收件人: amd-gfx@lists.freedesktop.org
主题: Re: 答复: 答复: 答复: [PATCH] drm/amdgpu: adjust gtt memory size


TTM limits system memory allocations to 1/2 system memory for regular 
processes, or 3/4 for privileged processes.


For compute we opted to make the GART bigger than system memory to allow for 
fragmentation. However, we argued that you don't really need GART for most 
buffers anyways, since they are never accessed in VMID 0. They need to be 
mapped in some VM page table, but never in the GART table. So I think the 
long-term goal should be to remove the need to allocate GART addresses for 
system memory BOs. Then you could get away with a quite small GART size.


Regards,

  Felix

On 16-07-21 08:04 AM, Christian König wrote:

we can add people in if necessary here. so the initial though of this change is 
with latest big memory GPU released, the remote memory goes very large, like 
Polaris10.

According to the windows WDDM, remote memory size is half of the system memory, 
I don't know what was discussed before, Please let me know if there're use case 
indeed need that big of remote memory.

Well that is at least something that makes sense.

Previous discussions where always settled around making GART bigger than VRAM 
and not the other way around.

Anyway I think a prerequisite to this is actually allowing half of system 
memory to be allocated for GART and I'm not sure if that is currently the case. 
TTM has a limitation for that as well, but I off hand don't remember how high 
much it was.

I think we should use the same value which TTM uses for the upper limit here.

Regards,
Christian.

Am 21.07.2016 um 13:56 schrieb Wang, Qingqing:

alright, I didn't realize you're not in that thread.

we can add people in if necessary here. so the initial though of this change is 
with latest big memory GPU released, the remote memory goes very large, like 
Polaris10.

According to the windows WDDM, remote memory size is half of the system memory, 
I don't know what was discussed before, Please let me know if there're use case 
indeed need that big of remote memory.


发件人: Christian König 
发送时间: 2016年7月21日 19:48:17
收件人: Wang, Qingqing; 
amd-gfx@lists.freedesktop.org
主题: Re: 答复: 答复: [PATCH] drm/amdgpu: adjust gtt memory size

Am 21.07.2016 um 13:15 schrieb Wang, Qingqing:

Actually that discussion was held long ago internally but we never got a 
conclusion on this IIRC. So we should probably continue the discussion on this 
thread now.

-there is a thread for this topic yesterday, some people are not in amd-gfx 
mail list, we'd better discuss in the internal thread.

No, exactly for this reason we have the public mailing list. I for example 
wasn't part of the internal thread either.

Christian.


发件人: Christian König 
发送时间: 2016年7月21日 17:48:41
收件人: Wang, Qingqing; 
amd-gfx@lists.freedesktop.org
主题: Re: 答复: [PATCH] drm/amdgpu: adjust gtt memory size

Am 21.07.2016 um 11:16 schrieb Wang, Qingqing:

This doesn't work on 32bit systems since totalram is a long (IIRC) and
so this will certainly overflow.

-- can you be more specific, how could the overflow happen?

Both values are 32bit on a 32bit system, so as soon as you have more than 4GB 
installed this will overflow.

You need to cast the long to a 64bit value to avoid that.



Additional if I remember correctly we didn't ended the discussion on
what to do here with a conclusion.

-- ok, since it happens in anther mail list, we should stop talking about it 
here, you can send out your ideas there.

Actually that discussion was held long ago internally but we never got a 
conclusion on this IIRC. So we should probably continue the discussion on this 
thread now.

Regards,
Christian.


发件人: amd-gfx 

[PATCH 1/2] drm/amdgpu: fix default UVD context size

2016-07-28 Thread Leo Liu
From: Christian König 

Context buffers should be denied by default, not allowed.

Signed-off-by: Christian König 
Reviewed-by: Leo Liu 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index b39238a..28c1b62 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -410,7 +410,7 @@ static int amdgpu_uvd_cs_msg_decode(struct amdgpu_device 
*adev, uint32_t *msg,
unsigned fs_in_mb = width_in_mb * height_in_mb;
 
unsigned image_size, tmp, min_dpb_size, num_dpb_buffer;
-   unsigned min_ctx_size = 0;
+   unsigned min_ctx_size = ~0;
 
image_size = width * height;
image_size += image_size / 2;
-- 
2.7.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 2/2] drm/amdgpu: enable UVD context buffer for older HW

2016-07-28 Thread Leo Liu
From: Christian König 

Supported starting on certain FW versions.

Signed-off-by: Christian König 
Reviewed-by: Leo Liu 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h |  1 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 28 ++--
 2 files changed, 27 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index d406ec7..9c07e38 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1688,6 +1688,7 @@ struct amdgpu_uvd {
struct amdgpu_ring  ring;
struct amdgpu_irq_src   irq;
booladdress_64_bit;
+   booluse_ctx_buf;
struct amd_sched_entity entity;
uint32_tsrbm_soft_reset;
 };
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 28c1b62..c22b64e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -41,8 +41,15 @@
 
 /* 1 second timeout */
 #define UVD_IDLE_TIMEOUT   msecs_to_jiffies(1000)
+
+/* Firmware versions for VI */
+#define FW_1_65_10 ((1 << 24) | (65 << 16) | (10 << 8))
+#define FW_1_87_11 ((1 << 24) | (87 << 16) | (11 << 8))
+#define FW_1_87_12 ((1 << 24) | (87 << 16) | (12 << 8))
+#define FW_1_37_15 ((1 << 24) | (37 << 16) | (15 << 8))
+
 /* Polaris10/11 firmware version */
-#define FW_1_66_16 ((1 << 24) | (66 << 16) | (16 << 8))
+#define FW_1_66_16 ((1 << 24) | (66 << 16) | (16 << 8))
 
 /* Firmware Names */
 #ifdef CONFIG_DRM_AMDGPU_CIK
@@ -220,6 +227,23 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
if (!amdgpu_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0))
adev->uvd.address_64_bit = true;
 
+   switch (adev->asic_type) {
+   case CHIP_TONGA:
+   adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_65_10;
+   break;
+   case CHIP_CARRIZO:
+   adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_87_11;
+   break;
+   case CHIP_FIJI:
+   adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_87_12;
+   break;
+   case CHIP_STONEY:
+   adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_37_15;
+   break;
+   default:
+   adev->uvd.use_ctx_buf = adev->asic_type >= CHIP_POLARIS10;
+   }
+
return 0;
 }
 
@@ -529,7 +553,7 @@ static int amdgpu_uvd_cs_msg_decode(struct amdgpu_device 
*adev, uint32_t *msg,
/* reference picture buffer */
min_dpb_size = image_size * num_dpb_buffer;
 
-   if (adev->asic_type < CHIP_POLARIS10){
+   if (!adev->uvd.use_ctx_buf){
/* macroblock context buffer */
min_dpb_size +=
width_in_mb * height_in_mb * num_dpb_buffer * 
192;
-- 
2.7.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 4/4] drm/amd/powerplay: not change uvd/vce block's state when initialize/resume

2016-07-28 Thread StDenis, Tom
Well I'll submit my patches momentarily and if Rex hasn't pushed his by then 
I'll push mine.


Tom



From: Deucher, Alexander
Sent: Thursday, July 28, 2016 10:17
To: StDenis, Tom; Zhu, Rex; amd-gfx@lists.freedesktop.org
Subject: RE: [PATCH 4/4] drm/amd/powerplay: not change uvd/vce block's state 
when initialize/resume


Whoever commits it first wins ;)



From: StDenis, Tom
Sent: Thursday, July 28, 2016 10:13 AM
To: Deucher, Alexander; Zhu, Rex; amd-gfx@lists.freedesktop.org
Subject: Re: [PATCH 4/4] drm/amd/powerplay: not change uvd/vce block's state 
when initialize/resume



Um, I had this in my worktree ... so uh do I drop it from mine or what?



Tom





From: amd-gfx 
mailto:amd-gfx-boun...@lists.freedesktop.org>>
 on behalf of Deucher, Alexander 
mailto:alexander.deuc...@amd.com>>
Sent: Thursday, July 28, 2016 09:48
To: Zhu, Rex; 
amd-gfx@lists.freedesktop.org
Cc: Zhu, Rex
Subject: RE: [PATCH 4/4] drm/amd/powerplay: not change uvd/vce block's state 
when initialize/resume



> -Original Message-
> From: amd-gfx [mailto:amd-gfx-boun...@lists.freedesktop.org] On Behalf
> Of Rex Zhu
> Sent: Thursday, July 28, 2016 5:53 AM
> To: amd-gfx@lists.freedesktop.org
> Cc: Zhu, Rex
> Subject: [PATCH 4/4] drm/amd/powerplay: not change uvd/vce block's state
> when initialize/resume
>
> Change-Id: I6e338a5faeb023b13bb450ecb1c4bb3eaa3b0ac5
> Signed-off-by: Rex Zhu mailto:rex@amd.com>>

For the series:
Reviewed-by: Alex Deucher 
mailto:alexander.deuc...@amd.com>>

> ---
>  drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c | 2 --
>  1 file changed, 2 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
> b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
> index d6635cc..635fc4b 100644
> --- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
> +++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
> @@ -30,7 +30,6 @@ static const pem_event_action * const initialize_event[]
> = {
>system_config_tasks,
>setup_asic_tasks,
>enable_dynamic_state_management_tasks,
> - enable_clock_power_gatings_tasks,
>get_2d_performance_state_tasks,
>set_performance_state_tasks,
>initialize_thermal_controller_tasks,
> @@ -140,7 +139,6 @@ static const pem_event_action * const
> resume_event[] = {
>setup_asic_tasks,
>enable_stutter_mode_tasks, /*must do this in boot state and before
> SMC is started */
>enable_dynamic_state_management_tasks,
> - enable_clock_power_gatings_tasks,
>enable_disable_bapm_tasks,
>initialize_thermal_controller_tasks,
>get_2d_performance_state_tasks,
> --
> 1.9.1
>
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amdgpu/powerplay: partial revert of endian fixes

2016-07-28 Thread Arnd Bergmann
On Thursday, July 28, 2016 10:00:46 AM CEST Alex Deucher wrote:
> This fixes a warning on big endian. Bitfields need to
> be handled properly.
> 
> Cc: Arnd Bergmann 
> Signed-off-by: Alex Deucher 
> 

Acked-by: Arnd Bergmann 

Thanks!
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


RE: [PATCH 2/2] drm/amdgpu: enable UVD context buffer for older HW

2016-07-28 Thread Deucher, Alexander
> -Original Message-
> From: amd-gfx [mailto:amd-gfx-boun...@lists.freedesktop.org] On Behalf
> Of Leo Liu
> Sent: Thursday, July 28, 2016 10:13 AM
> To: amd-gfx@lists.freedesktop.org
> Cc: Koenig, Christian; dri-de...@lists.freedesktop.org
> Subject: [PATCH 2/2] drm/amdgpu: enable UVD context buffer for older HW
> 
> From: Christian König 
> 
> Supported starting on certain FW versions.
> 
> Signed-off-by: Christian König 
> Reviewed-by: Leo Liu 

Series is:
Reviewed-by: Alex Deucher 

> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu.h |  1 +
>  drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 28
> ++--
>  2 files changed, 27 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> index d406ec7..9c07e38 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> @@ -1688,6 +1688,7 @@ struct amdgpu_uvd {
>   struct amdgpu_ring  ring;
>   struct amdgpu_irq_src   irq;
>   booladdress_64_bit;
> + booluse_ctx_buf;
>   struct amd_sched_entity entity;
>   uint32_tsrbm_soft_reset;
>  };
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
> index 28c1b62..c22b64e 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
> @@ -41,8 +41,15 @@
> 
>  /* 1 second timeout */
>  #define UVD_IDLE_TIMEOUT msecs_to_jiffies(1000)
> +
> +/* Firmware versions for VI */
> +#define FW_1_65_10   ((1 << 24) | (65 << 16) | (10 << 8))
> +#define FW_1_87_11   ((1 << 24) | (87 << 16) | (11 << 8))
> +#define FW_1_87_12   ((1 << 24) | (87 << 16) | (12 << 8))
> +#define FW_1_37_15   ((1 << 24) | (37 << 16) | (15 << 8))
> +
>  /* Polaris10/11 firmware version */
> -#define FW_1_66_16 ((1 << 24) | (66 << 16) | (16 << 8))
> +#define FW_1_66_16   ((1 << 24) | (66 << 16) | (16 << 8))
> 
>  /* Firmware Names */
>  #ifdef CONFIG_DRM_AMDGPU_CIK
> @@ -220,6 +227,23 @@ int amdgpu_uvd_sw_init(struct amdgpu_device
> *adev)
>   if (!amdgpu_ip_block_version_cmp(adev,
> AMD_IP_BLOCK_TYPE_UVD, 5, 0))
>   adev->uvd.address_64_bit = true;
> 
> + switch (adev->asic_type) {
> + case CHIP_TONGA:
> + adev->uvd.use_ctx_buf = adev->uvd.fw_version >=
> FW_1_65_10;
> + break;
> + case CHIP_CARRIZO:
> + adev->uvd.use_ctx_buf = adev->uvd.fw_version >=
> FW_1_87_11;
> + break;
> + case CHIP_FIJI:
> + adev->uvd.use_ctx_buf = adev->uvd.fw_version >=
> FW_1_87_12;
> + break;
> + case CHIP_STONEY:
> + adev->uvd.use_ctx_buf = adev->uvd.fw_version >=
> FW_1_37_15;
> + break;
> + default:
> + adev->uvd.use_ctx_buf = adev->asic_type >=
> CHIP_POLARIS10;
> + }
> +
>   return 0;
>  }
> 
> @@ -529,7 +553,7 @@ static int amdgpu_uvd_cs_msg_decode(struct
> amdgpu_device *adev, uint32_t *msg,
>   /* reference picture buffer */
>   min_dpb_size = image_size * num_dpb_buffer;
> 
> - if (adev->asic_type < CHIP_POLARIS10){
> + if (!adev->uvd.use_ctx_buf){
>   /* macroblock context buffer */
>   min_dpb_size +=
>   width_in_mb * height_in_mb *
> num_dpb_buffer * 192;
> --
> 2.7.4
> 
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


RE: ATPX changes in drm-next-4.8 and D3cold handling

2016-07-28 Thread Deucher, Alexander
> -Original Message-
> From: Peter Wu [mailto:pe...@lekensteyn.nl]
> Sent: Thursday, July 21, 2016 6:43 AM
> To: Deucher, Alexander
> Cc: amd-gfx@lists.freedesktop.org; Zhang, Hawking; Koenig, Christian; dri-
> de...@lists.freedesktop.org; Christoph Haag
> Subject: ATPX changes in drm-next-4.8 and D3cold handling
> 
> Hi Alex,
> 
> There are a couple of changes for 4.8 that try to detect whether the
> "power_cntl" flag is present. Originally attributed to a firmware bug,
> it seems that the detection is performed too late resulting in flags
> that are always zero
> (https://bugzilla.kernel.org/show_bug.cgi?id=115321).  What PX platform
> are these patches tested with, did they have the same issue?
> 
> 
> In case you missed it, Dave's D3cold patches were succeeded by changes
> in PCI core. Relevant commits in the pci/pm branch:
> 
> 006d44e PCI: Add runtime PM support for PCIe ports
> 16468c7 ACPI / hotplug / PCI: Runtime resume bridge before rescan
> d963f65 PCI: Power on bridges before scanning new devices
> 9d26d3a PCI: Put PCIe ports into D3 during suspend
> 43f7f88 PCI: Don't clear d3cold_allowed for PCIe ports

Did those get merged yet?

> 
> With these changes, the nouveau driver had to disable use of the _DSM
> ACPI method (comparable to ATPX), otherwise both interfaces are used
> which could cause issues like being unable to resume the device.
> Also note that pcieport currently only handles D3cold for devices with a
> BIOS date in 2015 (or newer), you need to detect this with an approach
> like http://www.spinics.net/lists/linux-pci/msg52602.html
> 

My latest PX patches should handle this correctly.  We have flags in the ATPX 
interface to know what sort of system we are.  See:
https://cgit.freedesktop.org/~agd5f/linux/commit/?h=drm-next-4.8&id=b8c9fd5ad4b478ec1a5482177833e1a7082e48bd
https://cgit.freedesktop.org/~agd5f/linux/commit/?h=drm-next-4.8&id=31764c1e3b2bd6e9c8eaea1318a215afb6a8bad9
https://cgit.freedesktop.org/~agd5f/linux/commit/?h=drm-next-4.8&id=8491999285a3e5a5395ac87098bb1f26c465b62b

I just need to revert this commit once the d3cold patches land:
https://cgit.freedesktop.org/~agd5f/linux/commit/?h=drm-next-4.8&id=bdfb76040068d960cb9e226876be8a508d741c4a


> We also found that the Nvidia HDMI audio device (function 1) would
> prevent the pcieport from sleeping. For modern Nvidia hardware this is
> apparently not an issue because these somehow hide the audio device, but
> it might be an issue for AMD hardware. See also
> https://lists.freedesktop.org/archives/dri-devel/2016-July/112759.html

Thanks for the heads up.

Alex

> --
> Kind regards,
> Peter Wu
> https://lekensteyn.nl
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: ATPX changes in drm-next-4.8 and D3cold handling

2016-07-28 Thread Lukas Wunner
On Thu, Jul 28, 2016 at 03:33:25PM +, Deucher, Alexander wrote:
> > From: Peter Wu [mailto:pe...@lekensteyn.nl]
> > Sent: Thursday, July 21, 2016 6:43 AM
> > In case you missed it, Dave's D3cold patches were succeeded by changes
> > in PCI core. Relevant commits in the pci/pm branch:
> > 
> > 006d44e PCI: Add runtime PM support for PCIe ports
> > 16468c7 ACPI / hotplug / PCI: Runtime resume bridge before rescan
> > d963f65 PCI: Power on bridges before scanning new devices
> > 9d26d3a PCI: Put PCIe ports into D3 during suspend
> > 43f7f88 PCI: Don't clear d3cold_allowed for PCIe ports
> 
> Did those get merged yet?

They will go into 4.8. Should have gone into 4.7 already but were
dropped at the last minute.


> I just need to revert this commit once the d3cold patches land:
> https://cgit.freedesktop.org/~agd5f/linux/commit/?h=drm-next-4.8&id=bdfb76040068d960cb9e226876be8a508d741c4a

So you probably need to revert this now.

Best regards,

Lukas
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 07/14] drm/amd/powerplay: remove enable_clock_power_gatings_tasks from initialize and resume events

2016-07-28 Thread Zhu, Rex

For the series:
Reviewed-by: Rex Zhu 


Best Regards

Rex


From: amd-gfx  on behalf of Tom St Denis 

Sent: Thursday, July 28, 2016 10:19:05 PM
To: amd-gfx@lists.freedesktop.org
Cc: StDenis, Tom
Subject: [PATCH 07/14] drm/amd/powerplay: remove 
enable_clock_power_gatings_tasks from initialize and resume events

Setting PG state this early would cause lock ups in the IP block
initialized functions.

Signed-off-by: Tom St Denis 
---
 drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c | 2 --
 1 file changed, 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c 
b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
index d6635cc4b0fc..635fc4b48184 100644
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
+++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
@@ -30,7 +30,6 @@ static const pem_event_action * const initialize_event[] = {
 system_config_tasks,
 setup_asic_tasks,
 enable_dynamic_state_management_tasks,
-   enable_clock_power_gatings_tasks,
 get_2d_performance_state_tasks,
 set_performance_state_tasks,
 initialize_thermal_controller_tasks,
@@ -140,7 +139,6 @@ static const pem_event_action * const resume_event[] = {
 setup_asic_tasks,
 enable_stutter_mode_tasks, /*must do this in boot state and before SMC 
is started */
 enable_dynamic_state_management_tasks,
-   enable_clock_power_gatings_tasks,
 enable_disable_bapm_tasks,
 initialize_thermal_controller_tasks,
 get_2d_performance_state_tasks,
--
2.9.2

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
amd-gfx Info Page - 
lists.freedesktop.org
lists.freedesktop.org
To see the collection of prior postings to the list, visit the amd-gfx 
Archives. Using amd-gfx: To post a message to all the list members, send email 
...


___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH] drm/amdgpu/powerplay: enable powerplay by default on TOPAZ

2016-07-28 Thread Alex Deucher
Now that the implementation is complete.

Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c | 4 +---
 1 file changed, 1 insertion(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
index 260da02..57aa342 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
@@ -106,15 +106,13 @@ static int amdgpu_pp_early_init(void *handle)
break;
case CHIP_TONGA:
case CHIP_FIJI:
+   case CHIP_TOPAZ:
adev->pp_enabled = (amdgpu_powerplay == 0) ? false : true;
break;
case CHIP_CARRIZO:
case CHIP_STONEY:
adev->pp_enabled = (amdgpu_powerplay > 0) ? true : false;
break;
-   case CHIP_TOPAZ:
-   adev->pp_enabled = (amdgpu_powerplay > 0) ? true : false;
-   break;
/* These chips don't have powerplay implemenations */
case CHIP_BONAIRE:
case CHIP_HAWAII:
-- 
2.5.5

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: GFX/UVD/VCE PG (v3)

2016-07-28 Thread Alex Deucher
On Thu, Jul 28, 2016 at 10:18 AM, Tom St Denis  wrote:
> In this respin I've factored out the changes into multiple patches
> followed by adding fixes for powerplay (don't gate on init/resume)
> as well as reverse the order of the operations when powering up
> (set the clock last).
>
> Finally, this series adds VCE PG tested on both the Carrizo and Stoney
> systems I have.

For the series:
Reviewed-by: Alex Deucher 
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 2/2] drm/radeon: init atpx at switcheroo register time

2016-07-28 Thread Peter Wu
On Wed, Jul 27, 2016 at 04:10:45PM -0400, Alex Deucher wrote:
> If we do it at enable time, it's too late for the feature
> checks.
> 
> bug: https://bugzilla.kernel.org/show_bug.cgi?id=115321
> Signed-off-by: Alex Deucher 
> Cc: Peter Wu 
> ---
>  drivers/gpu/drm/radeon/radeon_atpx_handler.c | 3 ++-
>  1 file changed, 2 insertions(+), 1 deletion(-)
> 
> diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c 
> b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
> index 86dcdf3..b46b4c4 100644
> --- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
> +++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
> @@ -536,7 +536,7 @@ static int radeon_atpx_get_client_id(struct pci_dev *pdev)
>  static const struct vga_switcheroo_handler radeon_atpx_handler = {
>   .switchto = radeon_atpx_switchto,
>   .power_state = radeon_atpx_power_state,
> - .init = radeon_atpx_init,
> + .init = NULL,

Let's just remove this initialization, it allows for trivial removal of
the init member later. radeon was the only user of this anyway.

>   .get_client_id = radeon_atpx_get_client_id,
>  };
>  
> @@ -572,6 +572,7 @@ static bool radeon_atpx_detect(void)
>   printk(KERN_INFO "vga_switcheroo: detected switching method %s 
> handle\n",
>  acpi_method_name);
>   radeon_atpx_priv.atpx_detected = true;
> + radeon_atpx_init();
>   return true;
>   }
>   return false;
> -- 
> 2.5.5
> 

Other than the command above, both patches are
Reviewed-by: Peter Wu 

I'll be testing this (via your amd-staging-4.6 branch) tomorrow on an
AMD/AMD hybrid (GCN 1.0 unfortunately, so restricted to radeon).
-- 
Kind regards,
Peter Wu
https://lekensteyn.nl
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: ATPX changes in drm-next-4.8 and D3cold handling

2016-07-28 Thread Peter Wu
On Thu, Jul 28, 2016 at 05:40:31PM +0200, Lukas Wunner wrote:
> On Thu, Jul 28, 2016 at 03:33:25PM +, Deucher, Alexander wrote:
> > > From: Peter Wu [mailto:pe...@lekensteyn.nl]
> > > Sent: Thursday, July 21, 2016 6:43 AM
> > > In case you missed it, Dave's D3cold patches were succeeded by changes
> > > in PCI core. Relevant commits in the pci/pm branch:
> > > 
> > > 006d44e PCI: Add runtime PM support for PCIe ports
> > > 16468c7 ACPI / hotplug / PCI: Runtime resume bridge before rescan
> > > d963f65 PCI: Power on bridges before scanning new devices
> > > 9d26d3a PCI: Put PCIe ports into D3 during suspend
> > > 43f7f88 PCI: Don't clear d3cold_allowed for PCIe ports
> > 
> > Did those get merged yet?
> 
> They will go into 4.8. Should have gone into 4.7 already but were
> dropped at the last minute.
> 
> 
> > I just need to revert this commit once the d3cold patches land:
> > https://cgit.freedesktop.org/~agd5f/linux/commit/?h=drm-next-4.8&id=bdfb76040068d960cb9e226876be8a508d741c4a
> 
> So you probably need to revert this now.
> 
> Best regards,
> Lukas

It is better to revert it before the PCI/PM patches get merged,
otherwise you risk that the device is already put in D3 before the
bridge tries to do it again. This is currently happening with nouveau on
-next.

Do these AMD hw exist on BIOSes pre-2015? Currently the D3cold work in
the PCI/PM branch only enable the D3cold handling via the bridge when
the BIOS is >= 2015.
-- 
Kind regards,
Peter Wu
https://lekensteyn.nl
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amdgpu/powerplay: enable powerplay by default on TOPAZ

2016-07-28 Thread Huang Rui
On Thu, Jul 28, 2016 at 01:40:02PM -0400, Alex Deucher wrote:
> Now that the implementation is complete.
> 
> Signed-off-by: Alex Deucher 

Acked-by: Huang Rui 

+ Alvin and Ken for awareness.

Thanks,
Rui

> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c | 4 +---
>  1 file changed, 1 insertion(+), 3 deletions(-)
> 
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
> index 260da02..57aa342 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
> @@ -106,15 +106,13 @@ static int amdgpu_pp_early_init(void *handle)
>   break;
>   case CHIP_TONGA:
>   case CHIP_FIJI:
> + case CHIP_TOPAZ:
>   adev->pp_enabled = (amdgpu_powerplay == 0) ? false : true;
>   break;
>   case CHIP_CARRIZO:
>   case CHIP_STONEY:
>   adev->pp_enabled = (amdgpu_powerplay > 0) ? true : false;
>   break;
> - case CHIP_TOPAZ:
> - adev->pp_enabled = (amdgpu_powerplay > 0) ? true : false;
> - break;
>   /* These chips don't have powerplay implemenations */
>   case CHIP_BONAIRE:
>   case CHIP_HAWAII:
> -- 
> 2.5.5
> 
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH v2 2/2] drm/amdgpu: expose AMDGPU_GEM_CREATE_VRAM_CLEARED to user space

2016-07-28 Thread Flora Cui
ping...

On Mon, Jul 25, 2016 at 02:15:24PM +0800, Flora Cui wrote:
> V2: fix the return value for fill failure and validate bo before
> filling data
> 
> Change-Id: I256178afa18c1a433fe60d8656d1c5cc5d55cf2f
> Signed-off-by: Flora Cui 
> Reviewed-by: Chunming Zhou 
> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 33 
> ++
>  include/uapi/drm/amdgpu_drm.h  |  2 ++
>  2 files changed, 35 insertions(+)
> 
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
> index 6f0873c..59f3a75 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
> @@ -277,11 +277,44 @@ int amdgpu_bo_create_restricted(struct amdgpu_device 
> *adev,
>   if (unlikely(r != 0)) {
>   return r;
>   }
> +
> + if (flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
> + bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) {
> + struct fence *fence;
> +
> + if (adev->mman.buffer_funcs_ring == NULL ||
> +!adev->mman.buffer_funcs_ring->ready) {
> + r = -EBUSY;
> + goto fail_free;
> + }
> +
> + r = amdgpu_bo_reserve(bo, false);
> + if (unlikely(r != 0))
> + goto fail_free;
> +
> + amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
> + r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
> + if (unlikely(r != 0))
> + goto fail_unreserve;
> +
> + amdgpu_fill_buffer(bo, 0, bo->tbo.resv, &fence);
> + amdgpu_bo_fence(bo, fence, false);
> + amdgpu_bo_unreserve(bo);
> + fence_put(bo->tbo.moving);
> + bo->tbo.moving = fence_get(fence);
> + fence_put(fence);
> + }
>   *bo_ptr = bo;
>  
>   trace_amdgpu_bo_create(bo);
>  
>   return 0;
> +
> +fail_unreserve:
> + amdgpu_bo_unreserve(bo);
> +fail_free:
> + amdgpu_bo_unref(&bo);
> + return r;
>  }
>  
>  int amdgpu_bo_create(struct amdgpu_device *adev,
> diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
> index 946f238..8df3816 100644
> --- a/include/uapi/drm/amdgpu_drm.h
> +++ b/include/uapi/drm/amdgpu_drm.h
> @@ -75,6 +75,8 @@
>  #define AMDGPU_GEM_CREATE_NO_CPU_ACCESS  (1 << 1)
>  /* Flag that USWC attributes should be used for GTT */
>  #define AMDGPU_GEM_CREATE_CPU_GTT_USWC   (1 << 2)
> +/* Flag that the memory should be in VRAM and cleared */
> +#define AMDGPU_GEM_CREATE_VRAM_CLEARED   (1 << 3)
>  
>  struct drm_amdgpu_gem_create_in  {
>   /** the requested memory size */
> -- 
> 1.9.1
> 
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx