For interfaces like gpu metrics, driver returns a formatted structure based on IP version. Add a separate data structure for such tables which also tracks the cache intervals.
Signed-off-by: Lijo Lazar <[email protected]> --- drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h | 88 +++++++++++++++++++ 1 file changed, 88 insertions(+) diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h index 8815fc70b63b..23ab75b705b6 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h @@ -269,6 +269,16 @@ struct smu_table { struct smu_table_cache cache; }; +enum smu_driver_table_id { + SMU_DRIVER_TABLE_GPU_METRICS = 0, + SMU_DRIVER_TABLE_COUNT, +}; + +struct smu_driver_table { + enum smu_driver_table_id id; + struct smu_table_cache cache; +}; + enum smu_perf_level_designation { PERF_LEVEL_ACTIVITY, PERF_LEVEL_POWER_CONTAINMENT, @@ -373,6 +383,8 @@ struct smu_table_context { uint32_t gpu_metrics_table_size; void *gpu_metrics_table; + + struct smu_driver_table driver_tables[SMU_DRIVER_TABLE_COUNT]; }; struct smu_context; @@ -1745,6 +1757,82 @@ static inline void smu_table_cache_fini(struct smu_context *smu, } } +static inline int smu_driver_table_init(struct smu_context *smu, + enum smu_driver_table_id table_id, + size_t size, uint32_t cache_interval) +{ + struct smu_table_context *smu_table = &smu->smu_table; + struct smu_driver_table *driver_tables = smu_table->driver_tables; + + if (table_id >= SMU_DRIVER_TABLE_COUNT) + return -EINVAL; + + driver_tables[table_id].id = table_id; + driver_tables[table_id].cache.buffer = kzalloc(size, GFP_KERNEL); + if (!driver_tables[table_id].cache.buffer) + return -ENOMEM; + + driver_tables[table_id].cache.last_cache_time = 0; + driver_tables[table_id].cache.interval = cache_interval; + driver_tables[table_id].cache.size = size; + + return 0; +} + +static inline void smu_driver_table_fini(struct smu_context *smu, + enum smu_driver_table_id table_id) +{ + struct smu_table_context *smu_table = &smu->smu_table; + struct smu_driver_table *driver_tables = smu_table->driver_tables; + + if (table_id >= SMU_DRIVER_TABLE_COUNT) + return; + + if (driver_tables[table_id].cache.buffer) { + kfree(driver_tables[table_id].cache.buffer); + driver_tables[table_id].cache.buffer = NULL; + driver_tables[table_id].cache.last_cache_time = 0; + driver_tables[table_id].cache.interval = 0; + } +} + +static inline bool smu_driver_table_is_valid(struct smu_driver_table *table) +{ + if (!table->cache.buffer || !table->cache.last_cache_time || + !table->cache.interval || !table->cache.size || + time_after(jiffies, + table->cache.last_cache_time + + msecs_to_jiffies(table->cache.interval))) + return false; + + return true; +} + +static inline void *smu_driver_table_ptr(struct smu_context *smu, + enum smu_driver_table_id table_id) +{ + struct smu_table_context *smu_table = &smu->smu_table; + struct smu_driver_table *driver_tables = smu_table->driver_tables; + + if (table_id >= SMU_DRIVER_TABLE_COUNT) + return NULL; + + return driver_tables[table_id].cache.buffer; +} + +static inline void +smu_driver_table_update_cache_time(struct smu_context *smu, + enum smu_driver_table_id table_id) +{ + struct smu_table_context *smu_table = &smu->smu_table; + struct smu_driver_table *driver_tables = smu_table->driver_tables; + + if (table_id >= SMU_DRIVER_TABLE_COUNT) + return; + + driver_tables[table_id].cache.last_cache_time = jiffies; +} + #if !defined(SWSMU_CODE_LAYER_L2) && !defined(SWSMU_CODE_LAYER_L3) && !defined(SWSMU_CODE_LAYER_L4) int smu_get_power_limit(void *handle, uint32_t *limit, -- 2.49.0
