Currently, the amd_pstate driver only checks if the lcore_id is within the RTE_MAX_LCORE range, but fails to verify if the core is actually active or managed by the application. This lacks sufficient validation.
This patch add a lcore role check to the cpufreq-related APIs. Although service cores do not typically invoke these APIs, they may operate in polling states where power management is required. To maintain compatibility with applications using service cores, the validation logic now explicitly allows both ROLE_RTE and ROLE_SERVICE. Signed-off-by: Huisong Li <[email protected]> --- drivers/power/amd_pstate/amd_pstate_cpufreq.c | 56 +++++++++---------- 1 file changed, 28 insertions(+), 28 deletions(-) diff --git a/drivers/power/amd_pstate/amd_pstate_cpufreq.c b/drivers/power/amd_pstate/amd_pstate_cpufreq.c index 95495bff7d..bca148dc8d 100644 --- a/drivers/power/amd_pstate/amd_pstate_cpufreq.c +++ b/drivers/power/amd_pstate/amd_pstate_cpufreq.c @@ -360,9 +360,8 @@ power_amd_pstate_cpufreq_init(unsigned int lcore_id) return -1; } - if (lcore_id >= RTE_MAX_LCORE) { - POWER_LOG(ERR, "Lcore id %u can not exceeds %u", - lcore_id, RTE_MAX_LCORE - 1U); + if (!rte_lcore_is_eal_managed(lcore_id)) { + POWER_LOG(ERR, "lcore id %u is invalid.", lcore_id); return -1; } @@ -443,11 +442,11 @@ power_amd_pstate_cpufreq_exit(unsigned int lcore_id) struct amd_pstate_power_info *pi; uint32_t exp_state; - if (lcore_id >= RTE_MAX_LCORE) { - POWER_LOG(ERR, "Lcore id %u can not exceeds %u", - lcore_id, RTE_MAX_LCORE - 1U); + if (!rte_lcore_is_eal_managed(lcore_id)) { + POWER_LOG(ERR, "lcore id %u is invalid.", lcore_id); return -1; } + pi = &lcore_power_info[lcore_id]; exp_state = POWER_USED; /* The power in use state works as a guard variable between @@ -493,8 +492,8 @@ power_amd_pstate_cpufreq_freqs(unsigned int lcore_id, uint32_t *freqs, uint32_t { struct amd_pstate_power_info *pi; - if (lcore_id >= RTE_MAX_LCORE) { - POWER_LOG(ERR, "Invalid lcore ID"); + if (!rte_lcore_is_eal_managed(lcore_id)) { + POWER_LOG(ERR, "lcore id %u is invalid.", lcore_id); return 0; } @@ -516,8 +515,8 @@ power_amd_pstate_cpufreq_freqs(unsigned int lcore_id, uint32_t *freqs, uint32_t uint32_t power_amd_pstate_cpufreq_get_freq(unsigned int lcore_id) { - if (lcore_id >= RTE_MAX_LCORE) { - POWER_LOG(ERR, "Invalid lcore ID"); + if (!rte_lcore_is_eal_managed(lcore_id)) { + POWER_LOG(ERR, "lcore id %u is invalid.", lcore_id); return RTE_POWER_INVALID_FREQ_INDEX; } @@ -527,8 +526,8 @@ power_amd_pstate_cpufreq_get_freq(unsigned int lcore_id) int power_amd_pstate_cpufreq_set_freq(unsigned int lcore_id, uint32_t index) { - if (lcore_id >= RTE_MAX_LCORE) { - POWER_LOG(ERR, "Invalid lcore ID"); + if (!rte_lcore_is_eal_managed(lcore_id)) { + POWER_LOG(ERR, "lcore id %u is invalid.", lcore_id); return -1; } @@ -540,8 +539,8 @@ power_amd_pstate_cpufreq_freq_down(unsigned int lcore_id) { struct amd_pstate_power_info *pi; - if (lcore_id >= RTE_MAX_LCORE) { - POWER_LOG(ERR, "Invalid lcore ID"); + if (!rte_lcore_is_eal_managed(lcore_id)) { + POWER_LOG(ERR, "lcore id %u is invalid.", lcore_id); return -1; } @@ -558,8 +557,8 @@ power_amd_pstate_cpufreq_freq_up(unsigned int lcore_id) { struct amd_pstate_power_info *pi; - if (lcore_id >= RTE_MAX_LCORE) { - POWER_LOG(ERR, "Invalid lcore ID"); + if (!rte_lcore_is_eal_managed(lcore_id)) { + POWER_LOG(ERR, "lcore id %u is invalid.", lcore_id); return -1; } @@ -575,8 +574,8 @@ power_amd_pstate_cpufreq_freq_up(unsigned int lcore_id) int power_amd_pstate_cpufreq_freq_max(unsigned int lcore_id) { - if (lcore_id >= RTE_MAX_LCORE) { - POWER_LOG(ERR, "Invalid lcore ID"); + if (!rte_lcore_is_eal_managed(lcore_id)) { + POWER_LOG(ERR, "lcore id %u is invalid.", lcore_id); return -1; } @@ -600,8 +599,8 @@ power_amd_pstate_cpufreq_freq_min(unsigned int lcore_id) { struct amd_pstate_power_info *pi; - if (lcore_id >= RTE_MAX_LCORE) { - POWER_LOG(ERR, "Invalid lcore ID"); + if (!rte_lcore_is_eal_managed(lcore_id)) { + POWER_LOG(ERR, "lcore id %u is invalid.", lcore_id); return -1; } @@ -616,8 +615,8 @@ power_amd_pstate_turbo_status(unsigned int lcore_id) { struct amd_pstate_power_info *pi; - if (lcore_id >= RTE_MAX_LCORE) { - POWER_LOG(ERR, "Invalid lcore ID"); + if (!rte_lcore_is_eal_managed(lcore_id)) { + POWER_LOG(ERR, "lcore id %u is invalid.", lcore_id); return -1; } @@ -631,8 +630,8 @@ power_amd_pstate_enable_turbo(unsigned int lcore_id) { struct amd_pstate_power_info *pi; - if (lcore_id >= RTE_MAX_LCORE) { - POWER_LOG(ERR, "Invalid lcore ID"); + if (!rte_lcore_is_eal_managed(lcore_id)) { + POWER_LOG(ERR, "lcore id %u is invalid.", lcore_id); return -1; } @@ -667,8 +666,8 @@ power_amd_pstate_disable_turbo(unsigned int lcore_id) { struct amd_pstate_power_info *pi; - if (lcore_id >= RTE_MAX_LCORE) { - POWER_LOG(ERR, "Invalid lcore ID"); + if (!rte_lcore_is_eal_managed(lcore_id)) { + POWER_LOG(ERR, "lcore id %u is invalid.", lcore_id); return -1; } @@ -695,10 +694,11 @@ power_amd_pstate_get_capabilities(unsigned int lcore_id, { struct amd_pstate_power_info *pi; - if (lcore_id >= RTE_MAX_LCORE) { - POWER_LOG(ERR, "Invalid lcore ID"); + if (!rte_lcore_is_eal_managed(lcore_id)) { + POWER_LOG(ERR, "lcore id %u is invalid.", lcore_id); return -1; } + if (caps == NULL) { POWER_LOG(ERR, "Invalid argument"); return -1; -- 2.33.0

