[WHAT]
1. is_pwrseq0 needs to check link before accessing link->link_index.
2. context is checked before accessing its bw_ctx.dml2
3. clk_mgr_base->bw_params is checked before clk_table.num_entries_per_cl

This fixes 4 REVERSE_INULL issues reported by Coverity.

Reviewed-by: Rodrigo Siqueira <rodrigo.sique...@amd.com>
Acked-by: Alex Hung <alex.h...@amd.com>
Signed-off-by: Alex Hung <alex.h...@amd.com>
---
 .../drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c   | 10 ++++++----
 .../drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c | 10 ++++++----
 drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c   |  2 +-
 drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c     |  4 +++-
 4 files changed, 16 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c 
b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
index 06f0c41ad6f1..3b10b24f5e23 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
@@ -163,9 +163,14 @@ void dcn32_init_clocks(struct clk_mgr *clk_mgr_base)
 {
        struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
        unsigned int num_levels;
-       struct clk_limit_num_entries *num_entries_per_clk = 
&clk_mgr_base->bw_params->clk_table.num_entries_per_clk;
+       struct clk_limit_num_entries *num_entries_per_clk;
        unsigned int i;
 
+       if (!clk_mgr_base->bw_params)
+               return;
+
+       num_entries_per_clk = 
&clk_mgr_base->bw_params->clk_table.num_entries_per_clk;
+
        memset(&(clk_mgr_base->clks), 0, sizeof(struct dc_clocks));
        clk_mgr_base->clks.p_state_change_support = true;
        clk_mgr_base->clks.prev_p_state_change_support = true;
@@ -173,9 +178,6 @@ void dcn32_init_clocks(struct clk_mgr *clk_mgr_base)
        clk_mgr->smu_present = false;
        clk_mgr->dpm_present = false;
 
-       if (!clk_mgr_base->bw_params)
-               return;
-
        if (!clk_mgr_base->force_smu_not_present && 
dcn30_smu_get_smu_version(clk_mgr, &clk_mgr->smu_ver))
                clk_mgr->smu_present = true;
 
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c 
b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
index 70f06a7c882e..606b2411eee9 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
@@ -207,9 +207,14 @@ static void dcn401_build_wm_range_table(struct clk_mgr 
*clk_mgr)
 void dcn401_init_clocks(struct clk_mgr *clk_mgr_base)
 {
        struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
-       struct clk_limit_num_entries *num_entries_per_clk = 
&clk_mgr_base->bw_params->clk_table.num_entries_per_clk;
+       struct clk_limit_num_entries *num_entries_per_clk;
        unsigned int i;
 
+       if (!clk_mgr_base->bw_params)
+               return;
+
+       num_entries_per_clk = 
&clk_mgr_base->bw_params->clk_table.num_entries_per_clk;
+
        memset(&(clk_mgr_base->clks), 0, sizeof(struct dc_clocks));
        clk_mgr_base->clks.p_state_change_support = true;
        clk_mgr_base->clks.prev_p_state_change_support = true;
@@ -217,9 +222,6 @@ void dcn401_init_clocks(struct clk_mgr *clk_mgr_base)
        clk_mgr->smu_present = false;
        clk_mgr->dpm_present = false;
 
-       if (!clk_mgr_base->bw_params)
-               return;
-
        if (!clk_mgr_base->force_smu_not_present && 
dcn30_smu_get_smu_version(clk_mgr, &clk_mgr->smu_ver))
                clk_mgr->smu_present = true;
 
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c 
b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
index 54dd7e164635..8a8efe408a9d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
@@ -1084,7 +1084,7 @@ static enum dcn_zstate_support_state  
decide_zstate_support(struct dc *dc, struc
                struct dc_stream_status *stream_status = 
&context->stream_status[0];
                int minmum_z8_residency = dc->debug.minimum_z8_residency_time > 
0 ? dc->debug.minimum_z8_residency_time : 1000;
                bool allow_z8 = context->bw_ctx.dml.vba.StutterPeriod > 
(double)minmum_z8_residency;
-               bool is_pwrseq0 = link->link_index == 0;
+               bool is_pwrseq0 = (link && link->link_index == 0);
                bool is_psr = (link && (link->psr_settings.psr_version == 
DC_PSR_VERSION_1 ||
                                                link->psr_settings.psr_version 
== DC_PSR_VERSION_SU_1) && !link->panel_config.psr.disable_psr);
                bool is_replay = link && 
link->replay_settings.replay_feature_enabled;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c 
b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
index f4038ac2e476..90bb6e718301 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
@@ -687,12 +687,14 @@ static bool dml2_validate_and_build_resource(const struct 
dc *in_dc, struct dc_s
 
 static bool dml2_validate_only(struct dc_state *context)
 {
-       struct dml2_context *dml2 = context->bw_ctx.dml2;
+       struct dml2_context *dml2;
        unsigned int result = 0;
 
        if (!context || context->stream_count == 0)
                return true;
 
+       dml2 = context->bw_ctx.dml2;
+
        /* Zero out before each call before proceeding */
        memset(&dml2->v20.scratch, 0, sizeof(struct dml2_wrapper_scratch));
        memset(&dml2->v20.dml_core_ctx.policy, 0, sizeof(struct 
dml_mode_eval_policy_st));
-- 
2.34.1

Reply via email to