Allows you to force the selected performance level via sysfs.

Signed-off-by: Alex Deucher <alexander.deuc...@amd.com>
---
 drivers/gpu/drm/radeon/ci_dpm.c      | 153 +++++++++++++++++++++++++++++++++++
 drivers/gpu/drm/radeon/ppsmc.h       |   1 +
 drivers/gpu/drm/radeon/radeon_asic.c |   1 +
 drivers/gpu/drm/radeon/radeon_asic.h |   2 +
 4 files changed, 157 insertions(+)

diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index 28b2b36..9eebf1f 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -3601,6 +3601,153 @@ static int ci_generate_dpm_level_enable_mask(struct 
radeon_device *rdev,
        return 0;
 }
 
+static u32 ci_get_lowest_enabled_level(struct radeon_device *rdev,
+                                      u32 level_mask)
+{
+       u32 level = 0;
+
+       while ((level_mask & (1 << level)) == 0)
+               level++;
+
+       return level;
+}
+
+
+int ci_dpm_force_performance_level(struct radeon_device *rdev,
+                                  enum radeon_dpm_forced_level level)
+{
+       struct ci_power_info *pi = ci_get_pi(rdev);
+       PPSMC_Result smc_result;
+       u32 tmp, levels, i;
+       int ret;
+
+       if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
+               if ((!pi->sclk_dpm_key_disabled) &&
+                   pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
+                       levels = 0;
+                       tmp = pi->dpm_level_enable_mask.sclk_dpm_enable_mask;
+                       while (tmp >>= 1)
+                               levels++;
+                       if (levels) {
+                               ret = ci_dpm_force_state_sclk(rdev, levels);
+                               if (ret)
+                                       return ret;
+                               for (i = 0; i < rdev->usec_timeout; i++) {
+                                       tmp = 
(RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
+                                              CURR_SCLK_INDEX_MASK) >> 
CURR_SCLK_INDEX_SHIFT;
+                                       if (tmp == levels)
+                                               break;
+                                       udelay(1);
+                               }
+                       }
+               }
+               if ((!pi->mclk_dpm_key_disabled) &&
+                   pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
+                       levels = 0;
+                       tmp = pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
+                       while (tmp >>= 1)
+                               levels++;
+                       if (levels) {
+                               ret = ci_dpm_force_state_mclk(rdev, levels);
+                               if (ret)
+                                       return ret;
+                               for (i = 0; i < rdev->usec_timeout; i++) {
+                                       tmp = 
(RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
+                                              CURR_MCLK_INDEX_MASK) >> 
CURR_MCLK_INDEX_SHIFT;
+                                       if (tmp == levels)
+                                               break;
+                                       udelay(1);
+                               }
+                       }
+               }
+               if ((!pi->pcie_dpm_key_disabled) &&
+                   pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
+                       levels = 0;
+                       tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
+                       while (tmp >>= 1)
+                               levels++;
+                       if (levels) {
+                               ret = ci_dpm_force_state_pcie(rdev, level);
+                               if (ret)
+                                       return ret;
+                               for (i = 0; i < rdev->usec_timeout; i++) {
+                                       tmp = 
(RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) &
+                                              CURR_PCIE_INDEX_MASK) >> 
CURR_PCIE_INDEX_SHIFT;
+                                       if (tmp == levels)
+                                               break;
+                                       udelay(1);
+                               }
+                       }
+               }
+       } else if (level == RADEON_DPM_FORCED_LEVEL_LOW) {
+               if ((!pi->sclk_dpm_key_disabled) &&
+                   pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
+                       levels = ci_get_lowest_enabled_level(rdev,
+                                                            
pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
+                       ret = ci_dpm_force_state_sclk(rdev, levels);
+                       if (ret)
+                               return ret;
+                       for (i = 0; i < rdev->usec_timeout; i++) {
+                               tmp = 
(RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
+                                      CURR_SCLK_INDEX_MASK) >> 
CURR_SCLK_INDEX_SHIFT;
+                               if (tmp == levels)
+                                       break;
+                               udelay(1);
+                       }
+               }
+               if ((!pi->mclk_dpm_key_disabled) &&
+                   pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
+                       levels = ci_get_lowest_enabled_level(rdev,
+                                                            
pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
+                       ret = ci_dpm_force_state_mclk(rdev, levels);
+                       if (ret)
+                               return ret;
+                       for (i = 0; i < rdev->usec_timeout; i++) {
+                               tmp = 
(RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
+                                      CURR_MCLK_INDEX_MASK) >> 
CURR_MCLK_INDEX_SHIFT;
+                               if (tmp == levels)
+                                       break;
+                               udelay(1);
+                       }
+               }
+               if ((!pi->pcie_dpm_key_disabled) &&
+                   pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
+                       levels = ci_get_lowest_enabled_level(rdev,
+                                                            
pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
+                       ret = ci_dpm_force_state_pcie(rdev, levels);
+                       if (ret)
+                               return ret;
+                       for (i = 0; i < rdev->usec_timeout; i++) {
+                               tmp = 
(RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) &
+                                      CURR_PCIE_INDEX_MASK) >> 
CURR_PCIE_INDEX_SHIFT;
+                               if (tmp == levels)
+                                       break;
+                               udelay(1);
+                       }
+               }
+       } else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) {
+               if (!pi->sclk_dpm_key_disabled) {
+                       smc_result = ci_send_msg_to_smc(rdev, 
PPSMC_MSG_NoForcedLevel);
+                       if (smc_result != PPSMC_Result_OK)
+                               return -EINVAL;
+               }
+               if (!pi->mclk_dpm_key_disabled) {
+                       smc_result = ci_send_msg_to_smc(rdev, 
PPSMC_MSG_MCLKDPM_NoForcedLevel);
+                       if (smc_result != PPSMC_Result_OK)
+                               return -EINVAL;
+               }
+               if (!pi->pcie_dpm_key_disabled) {
+                       smc_result = ci_send_msg_to_smc(rdev, 
PPSMC_MSG_PCIeDPM_UnForceLevel);
+                       if (smc_result != PPSMC_Result_OK)
+                               return -EINVAL;
+               }
+       }
+
+       rdev->pm.dpm.forced_level = level;
+
+       return 0;
+}
+
 static int ci_set_mc_special_registers(struct radeon_device *rdev,
                                       struct ci_mc_reg_table *table)
 {
@@ -4548,6 +4695,12 @@ int ci_dpm_set_power_state(struct radeon_device *rdev)
        if (pi->pcie_performance_request)
                ci_notify_link_speed_change_after_state_change(rdev, new_ps, 
old_ps);
 
+       ret = ci_dpm_force_performance_level(rdev, 
RADEON_DPM_FORCED_LEVEL_AUTO);
+       if (ret) {
+               DRM_ERROR("ci_dpm_force_performance_level failed\n");
+               return ret;
+       }
+
        return 0;
 }
 
diff --git a/drivers/gpu/drm/radeon/ppsmc.h b/drivers/gpu/drm/radeon/ppsmc.h
index 4c1ee6d..6828428 100644
--- a/drivers/gpu/drm/radeon/ppsmc.h
+++ b/drivers/gpu/drm/radeon/ppsmc.h
@@ -119,6 +119,7 @@ typedef uint8_t PPSMC_Result;
 #define PPSMC_MSG_SCLKDPM_SetEnabledMask      ((uint16_t) 0x145)
 #define PPSMC_MSG_MCLKDPM_SetEnabledMask      ((uint16_t) 0x146)
 #define PPSMC_MSG_PCIeDPM_ForceLevel          ((uint16_t) 0x147)
+#define PPSMC_MSG_PCIeDPM_UnForceLevel        ((uint16_t) 0x148)
 #define PPSMC_MSG_EnableVRHotGPIOInterrupt    ((uint16_t) 0x14a)
 #define PPSMC_MSG_DPM_Enable                  ((uint16_t) 0x14e)
 #define PPSMC_MSG_DPM_Disable                 ((uint16_t) 0x14f)
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c 
b/drivers/gpu/drm/radeon/radeon_asic.c
index e28f08b..705bc74 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -2468,6 +2468,7 @@ static struct radeon_asic ci_asic = {
                .get_mclk = &ci_dpm_get_mclk,
                .print_power_state = &ci_dpm_print_power_state,
                .debugfs_print_current_performance_level = 
&ci_dpm_debugfs_print_current_performance_level,
+               .force_performance_level = &ci_dpm_force_performance_level,
        },
        .pflip = {
                .pre_page_flip = &evergreen_pre_page_flip,
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h 
b/drivers/gpu/drm/radeon/radeon_asic.h
index 371a6f2..a887928 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -765,6 +765,8 @@ void ci_dpm_print_power_state(struct radeon_device *rdev,
                              struct radeon_ps *ps);
 void ci_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
                                                    struct seq_file *m);
+int ci_dpm_force_performance_level(struct radeon_device *rdev,
+                                  enum radeon_dpm_forced_level level);
 
 int kv_dpm_init(struct radeon_device *rdev);
 int kv_dpm_enable(struct radeon_device *rdev);
-- 
1.8.3.1

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/dri-devel

Reply via email to