Fix the initialization and usage of SMU v13.0.6 capability values. Use
caps_set/clear functions to set/clear capability.

Also, fix SET_UCLK_MAX capability on APUs, it is supported on APUs.

Signed-off-by: Lijo Lazar <lijo.la...@amd.com>

Fixes: 9bb53d2ce109 ("drm/amd/pm: Add capability flags for SMU v13.0.6")
---
v1: ("drm/amd/pm: Use correct macros for smu caps")
v2:
        Use caps_set/clear instead of macros (Alex). Commit message changed.
        Use BIT_ULL (Kevin)
        Fix SET_UCLK_MAX capability on APUs

 .../drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c  | 124 ++++++++++--------
 1 file changed, 72 insertions(+), 52 deletions(-)

diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c 
b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
index 56e26fcd3066..9e64392d23a9 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
@@ -118,8 +118,7 @@ enum smu_v13_0_6_caps {
        SMU_13_0_6_CAPS_ALL,
 };
 
-#define SMU_CAPS_MASK(x) (ULL(1) << x)
-#define SMU_CAPS(x) SMU_CAPS_MASK(SMU_13_0_6_CAPS_##x)
+#define SMU_CAPS(x) SMU_13_0_6_CAPS_##x
 
 struct mca_bank_ipid {
        enum amdgpu_mca_ip ip;
@@ -284,95 +283,116 @@ struct smu_v13_0_6_dpm_map {
        uint32_t *freq_table;
 };
 
-static void smu_v13_0_14_init_caps(struct smu_context *smu)
+static inline void smu_v13_0_6_caps_set(struct smu_context *smu,
+                                       enum smu_v13_0_6_caps caps)
+{
+       struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+
+       dpm_context->caps |= BIT_ULL(caps);
+}
+
+static inline void smu_v13_0_6_caps_clear(struct smu_context *smu,
+                                         enum smu_v13_0_6_caps caps)
 {
        struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
-       uint64_t caps = SMU_CAPS(DPM) | SMU_CAPS(UNI_METRICS) |
-                       SMU_CAPS(SET_UCLK_MAX) | SMU_CAPS(DPM_POLICY) |
-                       SMU_CAPS(PCIE_METRICS) | SMU_CAPS(CTF_LIMIT) |
-                       SMU_CAPS(MCA_DEBUG_MODE) | SMU_CAPS(RMA_MSG) |
-                       SMU_CAPS(ACA_SYND);
+
+       dpm_context->caps &= ~BIT_ULL(caps);
+}
+
+static inline bool smu_v13_0_6_caps_supported(struct smu_context *smu,
+                                             enum smu_v13_0_6_caps caps)
+{
+       struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+
+       return !!(dpm_context->caps & BIT_ULL(caps));
+}
+
+static void smu_v13_0_14_init_caps(struct smu_context *smu)
+{
+       enum smu_v13_0_6_caps default_caps_list[] = { SMU_CAPS(DPM),
+                                                     SMU_CAPS(UNI_METRICS),
+                                                     SMU_CAPS(SET_UCLK_MAX),
+                                                     SMU_CAPS(DPM_POLICY),
+                                                     SMU_CAPS(PCIE_METRICS),
+                                                     SMU_CAPS(CTF_LIMIT),
+                                                     SMU_CAPS(MCA_DEBUG_MODE),
+                                                     SMU_CAPS(RMA_MSG),
+                                                     SMU_CAPS(ACA_SYND) };
        uint32_t fw_ver = smu->smc_fw_version;
 
+       for (int i = 0; i < ARRAY_SIZE(default_caps_list); i++)
+               smu_v13_0_6_caps_set(smu, default_caps_list[i]);
+
        if (fw_ver >= 0x05550E00)
-               caps |= SMU_CAPS(OTHER_END_METRICS);
+               smu_v13_0_6_caps_set(smu, SMU_CAPS(OTHER_END_METRICS));
        if (fw_ver >= 0x05551000)
-               caps |= SMU_CAPS(HST_LIMIT_METRICS);
+               smu_v13_0_6_caps_set(smu, SMU_CAPS(HST_LIMIT_METRICS));
        if (fw_ver >= 0x05550B00)
-               caps |= SMU_CAPS(PER_INST_METRICS);
+               smu_v13_0_6_caps_set(smu, SMU_CAPS(PER_INST_METRICS));
        if (fw_ver > 0x05550f00)
-               caps |= SMU_CAPS(SDMA_RESET);
-
-       dpm_context->caps = caps;
+               smu_v13_0_6_caps_set(smu, SMU_CAPS(SDMA_RESET));
 }
 
 static void smu_v13_0_6_init_caps(struct smu_context *smu)
 {
-       uint64_t caps = SMU_CAPS(DPM) | SMU_CAPS(UNI_METRICS) |
-                       SMU_CAPS(SET_UCLK_MAX) | SMU_CAPS(DPM_POLICY) |
-                       SMU_CAPS(PCIE_METRICS) | SMU_CAPS(MCA_DEBUG_MODE) |
-                       SMU_CAPS(CTF_LIMIT) | SMU_CAPS(RMA_MSG) |
-                       SMU_CAPS(ACA_SYND);
-       struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+       enum smu_v13_0_6_caps default_caps_list[] = { SMU_CAPS(DPM),
+                                                     SMU_CAPS(UNI_METRICS),
+                                                     SMU_CAPS(SET_UCLK_MAX),
+                                                     SMU_CAPS(DPM_POLICY),
+                                                     SMU_CAPS(PCIE_METRICS),
+                                                     SMU_CAPS(CTF_LIMIT),
+                                                     SMU_CAPS(MCA_DEBUG_MODE),
+                                                     SMU_CAPS(RMA_MSG),
+                                                     SMU_CAPS(ACA_SYND) };
        struct amdgpu_device *adev = smu->adev;
        uint32_t fw_ver = smu->smc_fw_version;
        uint32_t pgm = (fw_ver >> 24) & 0xFF;
 
+       for (int i = 0; i < ARRAY_SIZE(default_caps_list); i++)
+               smu_v13_0_6_caps_set(smu, default_caps_list[i]);
        if (fw_ver < 0x552F00)
-               caps &= ~SMU_CAPS(DPM);
+               smu_v13_0_6_caps_clear(smu, SMU_CAPS(DPM));
 
        if (adev->flags & AMD_IS_APU) {
-               caps &= ~SMU_CAPS(PCIE_METRICS);
-               caps &= ~SMU_CAPS(SET_UCLK_MAX);
-               caps &= ~SMU_CAPS(DPM_POLICY);
-               caps &= ~SMU_CAPS(RMA_MSG);
-               caps &= ~SMU_CAPS(ACA_SYND);
+               smu_v13_0_6_caps_clear(smu, SMU_CAPS(PCIE_METRICS));
+               smu_v13_0_6_caps_clear(smu, SMU_CAPS(DPM_POLICY));
+               smu_v13_0_6_caps_clear(smu, SMU_CAPS(RMA_MSG));
+               smu_v13_0_6_caps_clear(smu, SMU_CAPS(ACA_SYND));
 
                if (fw_ver <= 0x4556900)
-                       caps &= ~SMU_CAPS(UNI_METRICS);
-
+                       smu_v13_0_6_caps_clear(smu, SMU_CAPS(UNI_METRICS));
                if (fw_ver >= 0x04556F00)
-                       caps |= SMU_CAPS(HST_LIMIT_METRICS);
+                       smu_v13_0_6_caps_set(smu, SMU_CAPS(HST_LIMIT_METRICS));
                if (fw_ver >= 0x04556A00)
-                       caps |= SMU_CAPS(PER_INST_METRICS);
+                       smu_v13_0_6_caps_set(smu, SMU_CAPS(PER_INST_METRICS));
                if (fw_ver < 0x554500)
-                       caps &= ~SMU_CAPS(CTF_LIMIT);
+                       smu_v13_0_6_caps_clear(smu, SMU_CAPS(CTF_LIMIT));
        } else {
                if (fw_ver >= 0x557600)
-                       caps |= SMU_CAPS(OTHER_END_METRICS);
+                       smu_v13_0_6_caps_set(smu, SMU_CAPS(OTHER_END_METRICS));
                if (fw_ver < 0x00556000)
-                       caps &= ~SMU_CAPS(DPM_POLICY);
+                       smu_v13_0_6_caps_clear(smu, SMU_CAPS(DPM_POLICY));
                if (amdgpu_sriov_vf(adev) && (fw_ver < 0x556600))
-                       caps &= ~SMU_CAPS(SET_UCLK_MAX);
+                       smu_v13_0_6_caps_clear(smu, SMU_CAPS(SET_UCLK_MAX));
                if (fw_ver < 0x556300)
-                       caps &= ~SMU_CAPS(PCIE_METRICS);
+                       smu_v13_0_6_caps_clear(smu, SMU_CAPS(PCIE_METRICS));
                if (fw_ver < 0x554800)
-                       caps &= ~SMU_CAPS(MCA_DEBUG_MODE);
+                       smu_v13_0_6_caps_clear(smu, SMU_CAPS(MCA_DEBUG_MODE));
                if (fw_ver >= 0x556F00)
-                       caps |= SMU_CAPS(PER_INST_METRICS);
+                       smu_v13_0_6_caps_set(smu, SMU_CAPS(PER_INST_METRICS));
                if (fw_ver < 0x554500)
-                       caps &= ~SMU_CAPS(CTF_LIMIT);
+                       smu_v13_0_6_caps_clear(smu, SMU_CAPS(CTF_LIMIT));
                if (fw_ver < 0x00555a00)
-                       caps &= ~SMU_CAPS(RMA_MSG);
+                       smu_v13_0_6_caps_clear(smu, SMU_CAPS(RMA_MSG));
                if (fw_ver < 0x00555600)
-                       caps &= ~SMU_CAPS(ACA_SYND);
+                       smu_v13_0_6_caps_clear(smu, SMU_CAPS(ACA_SYND));
                if (pgm == 0 && fw_ver >= 0x557900)
-                       caps |= SMU_CAPS(HST_LIMIT_METRICS);
+                       smu_v13_0_6_caps_set(smu, SMU_CAPS(HST_LIMIT_METRICS));
        }
        if (((pgm == 7) && (fw_ver > 0x07550700)) ||
            ((pgm == 0) && (fw_ver > 0x00557700)) ||
            ((pgm == 4) && (fw_ver > 0x4556e6c)))
-               caps |= SMU_CAPS(SDMA_RESET);
-
-       dpm_context->caps = caps;
-}
-
-static inline bool smu_v13_0_6_caps_supported(struct smu_context *smu,
-                                             enum smu_v13_0_6_caps caps)
-{
-       struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
-
-       return (dpm_context->caps & SMU_CAPS_MASK(caps)) == SMU_CAPS_MASK(caps);
+               smu_v13_0_6_caps_set(smu, SMU_CAPS(SDMA_RESET));
 }
 
 static void smu_v13_0_x_init_caps(struct smu_context *smu)
-- 
2.25.1

Reply via email to