add support to query vram info from firmware
v2: change APU vram type, add multi-aid check
v3: seperate vram info query function into 3 parts and
call them in a helper func when requirements
are met.
Signed-off-by: Gangliang Xie <[email protected]>
---
.../gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c | 459 ++++++++++--------
.../gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h | 4 +
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c | 22 +
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h | 2 +
drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c | 2 +-
drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c | 2 +-
drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c | 2 +-
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 58 ++-
8 files changed, 308 insertions(+), 243 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
index 7f4751e5caaf..cd9aa5b45e94 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
@@ -373,249 +373,280 @@ int amdgpu_atomfirmware_get_uma_carveout_info(struct
amdgpu_device *adev,
return -ENODEV;
}
-int
-amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
+int amdgpu_atomfirmware_get_integrated_system_info(struct amdgpu_device *adev,
int *vram_width, int *vram_type,
int *vram_vendor)
{
struct amdgpu_mode_info *mode_info = &adev->mode_info;
- int index, i = 0;
+ int index;
u16 data_offset, size;
union igp_info *igp_info;
- union vram_info *vram_info;
- union umc_info *umc_info;
- union vram_module *vram_module;
u8 frev, crev;
u8 mem_type;
- u8 mem_vendor;
u32 mem_channel_number;
u32 mem_channel_width;
- u32 module_id;
- if (adev->flags & AMD_IS_APU)
- index =
get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+ index =
get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
integratedsysteminfo);
- else {
- switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
- case IP_VERSION(12, 0, 0):
- case IP_VERSION(12, 0, 1):
- index =
get_index_into_master_table(atom_master_list_of_data_tables_v2_1, umc_info);
+ if (amdgpu_atom_parse_data_header(mode_info->atom_context,
+ index, &size,
+ &frev, &crev, &data_offset)) {
+ igp_info = (union igp_info *)
+ (mode_info->atom_context->bios + data_offset);
+ switch (frev) {
+ case 1:
+ switch (crev) {
+ case 11:
+ case 12:
+ mem_channel_number =
igp_info->v11.umachannelnumber;
+ if (!mem_channel_number)
+ mem_channel_number = 1;
+ mem_type = igp_info->v11.memorytype;
+ if (mem_type == LpDdr5MemType)
+ mem_channel_width = 32;
+ else
+ mem_channel_width = 64;
+ if (vram_width)
+ *vram_width = mem_channel_number *
mem_channel_width;
+ if (vram_type)
+ *vram_type =
convert_atom_mem_type_to_vram_type(adev, mem_type);
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case 2:
+ switch (crev) {
+ case 1:
+ case 2:
+ mem_channel_number =
igp_info->v21.umachannelnumber;
+ if (!mem_channel_number)
+ mem_channel_number = 1;
+ mem_type = igp_info->v21.memorytype;
+ if (mem_type == LpDdr5MemType)
+ mem_channel_width = 32;
+ else
+ mem_channel_width = 64;
+ if (vram_width)
+ *vram_width = mem_channel_number *
mem_channel_width;
+ if (vram_type)
+ *vram_type =
convert_atom_mem_type_to_vram_type(adev, mem_type);
+ break;
+ case 3:
+ mem_channel_number =
igp_info->v23.umachannelnumber;
+ if (!mem_channel_number)
+ mem_channel_number = 1;
+ mem_type = igp_info->v23.memorytype;
+ if (mem_type == LpDdr5MemType)
+ mem_channel_width = 32;
+ else
+ mem_channel_width = 64;
+ if (vram_width)
+ *vram_width = mem_channel_number *
mem_channel_width;
+ if (vram_type)
+ *vram_type =
convert_atom_mem_type_to_vram_type(adev, mem_type);
+ break;
+ default:
+ return -EINVAL;
+ }
break;
default:
- index =
get_index_into_master_table(atom_master_list_of_data_tables_v2_1, vram_info);
+ return -EINVAL;
}
+ } else {
+ return -EINVAL;
}
+ return 0;
+}
+
+int amdgpu_atomfirmware_get_umc_info(struct amdgpu_device *adev,
+ int *vram_width, int *vram_type,
+ int *vram_vendor)
+{
+ struct amdgpu_mode_info *mode_info = &adev->mode_info;
+ int index;
+ u16 data_offset, size;
+ union umc_info *umc_info;
+ u8 frev, crev;
+ u8 mem_type;
+ u8 mem_vendor;
+ u32 mem_channel_number;
+ u32 mem_channel_width;
+
+ index =
get_index_into_master_table(atom_master_list_of_data_tables_v2_1, umc_info);
+
if (amdgpu_atom_parse_data_header(mode_info->atom_context,
index, &size,
&frev, &crev, &data_offset)) {
- if (adev->flags & AMD_IS_APU) {
- igp_info = (union igp_info *)
- (mode_info->atom_context->bios + data_offset);
- switch (frev) {
- case 1:
- switch (crev) {
- case 11:
- case 12:
- mem_channel_number =
igp_info->v11.umachannelnumber;
- if (!mem_channel_number)
- mem_channel_number = 1;
- mem_type = igp_info->v11.memorytype;
- if (mem_type == LpDdr5MemType)
- mem_channel_width = 32;
- else
- mem_channel_width = 64;
- if (vram_width)
- *vram_width =
mem_channel_number * mem_channel_width;
- if (vram_type)
- *vram_type =
convert_atom_mem_type_to_vram_type(adev, mem_type);
- break;
- default:
- return -EINVAL;
- }
- break;
- case 2:
- switch (crev) {
- case 1:
- case 2:
- mem_channel_number =
igp_info->v21.umachannelnumber;
- if (!mem_channel_number)
- mem_channel_number = 1;
- mem_type = igp_info->v21.memorytype;
- if (mem_type == LpDdr5MemType)
- mem_channel_width = 32;
- else
- mem_channel_width = 64;
- if (vram_width)
- *vram_width =
mem_channel_number * mem_channel_width;
- if (vram_type)
- *vram_type =
convert_atom_mem_type_to_vram_type(adev, mem_type);
- break;
- case 3:
- mem_channel_number =
igp_info->v23.umachannelnumber;
- if (!mem_channel_number)
- mem_channel_number = 1;
- mem_type = igp_info->v23.memorytype;
- if (mem_type == LpDdr5MemType)
- mem_channel_width = 32;
- else
- mem_channel_width = 64;
- if (vram_width)
- *vram_width =
mem_channel_number * mem_channel_width;
- if (vram_type)
- *vram_type =
convert_atom_mem_type_to_vram_type(adev, mem_type);
- break;
- default:
- return -EINVAL;
- }
+ umc_info = (union umc_info *)(mode_info->atom_context->bios +
data_offset);
+
+ if (frev == 4) {
+ switch (crev) {
+ case 0:
+ mem_channel_number =
le32_to_cpu(umc_info->v40.channel_num);
+ mem_type = le32_to_cpu(umc_info->v40.vram_type);
+ mem_channel_width =
le32_to_cpu(umc_info->v40.channel_width);
+ mem_vendor =
RREG32(adev->bios_scratch_reg_offset + 4) & 0xF;
+ if (vram_vendor)
+ *vram_vendor = mem_vendor;
+ if (vram_type)
+ *vram_type =
convert_atom_mem_type_to_vram_type(adev, mem_type);
+ if (vram_width)
+ *vram_width = mem_channel_number * (1
<< mem_channel_width);
break;
default:
return -EINVAL;
}
} else {
- switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
- case IP_VERSION(12, 0, 0):
- case IP_VERSION(12, 0, 1):
- umc_info = (union umc_info
*)(mode_info->atom_context->bios + data_offset);
-
- if (frev == 4) {
- switch (crev) {
- case 0:
- mem_channel_number =
le32_to_cpu(umc_info->v40.channel_num);
- mem_type =
le32_to_cpu(umc_info->v40.vram_type);
- mem_channel_width =
le32_to_cpu(umc_info->v40.channel_width);
- mem_vendor =
RREG32(adev->bios_scratch_reg_offset + 4) & 0xF;
- if (vram_vendor)
- *vram_vendor =
mem_vendor;
- if (vram_type)
- *vram_type =
convert_atom_mem_type_to_vram_type(adev, mem_type);
- if (vram_width)
- *vram_width =
mem_channel_number * (1 << mem_channel_width);
- break;
- default:
- return -EINVAL;
- }
- } else
- return -EINVAL;
+ return -EINVAL;
+ }
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
+ int *vram_width, int *vram_type,
+ int *vram_vendor)
+{
+ struct amdgpu_mode_info *mode_info = &adev->mode_info;
+ int index, i = 0;
+ u16 data_offset, size;
+ union vram_info *vram_info;
+ union vram_module *vram_module;
+ u8 frev, crev;
+ u8 mem_type;
+ u8 mem_vendor;
+ u32 mem_channel_number;
+ u32 mem_channel_width;
+ u32 module_id;
+
+ index =
get_index_into_master_table(atom_master_list_of_data_tables_v2_1, vram_info);
+
+ if (amdgpu_atom_parse_data_header(mode_info->atom_context,
+ index, &size,
+ &frev, &crev, &data_offset)) {
+ vram_info = (union vram_info *)
+ (mode_info->atom_context->bios + data_offset);
+
+ module_id = (RREG32(adev->bios_scratch_reg_offset + 4) &
0x00ff0000) >> 16;
+ if (frev == 3) {
+ switch (crev) {
+ /* v30 */
+ case 0:
+ vram_module = (union vram_module
*)vram_info->v30.vram_module;
+ mem_vendor = (vram_module->v30.dram_vendor_id)
& 0xF;
+ if (vram_vendor)
+ *vram_vendor = mem_vendor;
+ mem_type = vram_info->v30.memory_type;
+ if (vram_type)
+ *vram_type =
convert_atom_mem_type_to_vram_type(adev, mem_type);
+ mem_channel_number = vram_info->v30.channel_num;
+ mem_channel_width =
vram_info->v30.channel_width;
+ if (vram_width)
+ *vram_width = mem_channel_number * 16;
break;
default:
- vram_info = (union vram_info *)
- (mode_info->atom_context->bios +
data_offset);
-
- module_id =
(RREG32(adev->bios_scratch_reg_offset + 4) & 0x00ff0000) >> 16;
- if (frev == 3) {
- switch (crev) {
- /* v30 */
- case 0:
- vram_module = (union
vram_module *)vram_info->v30.vram_module;
- mem_vendor =
(vram_module->v30.dram_vendor_id) & 0xF;
- if (vram_vendor)
- *vram_vendor =
mem_vendor;
- mem_type =
vram_info->v30.memory_type;
- if (vram_type)
- *vram_type =
convert_atom_mem_type_to_vram_type(adev, mem_type);
- mem_channel_number =
vram_info->v30.channel_num;
- mem_channel_width =
vram_info->v30.channel_width;
- if (vram_width)
- *vram_width =
mem_channel_number * 16;
- break;
- default:
- return -EINVAL;
- }
- } else if (frev == 2) {
- switch (crev) {
- /* v23 */
- case 3:
- if (module_id >
vram_info->v23.vram_module_num)
- module_id = 0;
- vram_module = (union
vram_module *)vram_info->v23.vram_module;
- while (i < module_id) {
- vram_module = (union
vram_module *)
- ((u8
*)vram_module + vram_module->v9.vram_module_size);
- i++;
- }
- mem_type =
vram_module->v9.memory_type;
- if (vram_type)
- *vram_type =
convert_atom_mem_type_to_vram_type(adev, mem_type);
- mem_channel_number =
vram_module->v9.channel_num;
- mem_channel_width =
vram_module->v9.channel_width;
- if (vram_width)
- *vram_width =
mem_channel_number * (1 << mem_channel_width);
- mem_vendor =
(vram_module->v9.vender_rev_id) & 0xF;
- if (vram_vendor)
- *vram_vendor =
mem_vendor;
- break;
- /* v24 */
- case 4:
- if (module_id >
vram_info->v24.vram_module_num)
- module_id = 0;
- vram_module = (union
vram_module *)vram_info->v24.vram_module;
- while (i < module_id) {
- vram_module = (union
vram_module *)
- ((u8
*)vram_module + vram_module->v10.vram_module_size);
- i++;
- }
- mem_type =
vram_module->v10.memory_type;
- if (vram_type)
- *vram_type =
convert_atom_mem_type_to_vram_type(adev, mem_type);
- mem_channel_number =
vram_module->v10.channel_num;
- mem_channel_width =
vram_module->v10.channel_width;
- if (vram_width)
- *vram_width =
mem_channel_number * (1 << mem_channel_width);
- mem_vendor =
(vram_module->v10.vender_rev_id) & 0xF;
- if (vram_vendor)
- *vram_vendor =
mem_vendor;
- break;
- /* v25 */
- case 5:
- if (module_id >
vram_info->v25.vram_module_num)
- module_id = 0;
- vram_module = (union
vram_module *)vram_info->v25.vram_module;
- while (i < module_id) {
- vram_module = (union
vram_module *)
- ((u8
*)vram_module + vram_module->v11.vram_module_size);
- i++;
- }
- mem_type =
vram_module->v11.memory_type;
- if (vram_type)
- *vram_type =
convert_atom_mem_type_to_vram_type(adev, mem_type);
- mem_channel_number =
vram_module->v11.channel_num;
- mem_channel_width =
vram_module->v11.channel_width;
- if (vram_width)
- *vram_width =
mem_channel_number * (1 << mem_channel_width);
- mem_vendor =
(vram_module->v11.vender_rev_id) & 0xF;
- if (vram_vendor)
- *vram_vendor =
mem_vendor;
- break;
- /* v26 */
- case 6:
- if (module_id >
vram_info->v26.vram_module_num)
- module_id = 0;
- vram_module = (union
vram_module *)vram_info->v26.vram_module;
- while (i < module_id) {
- vram_module = (union
vram_module *)
- ((u8
*)vram_module + vram_module->v9.vram_module_size);
- i++;
- }
- mem_type =
vram_module->v9.memory_type;
- if (vram_type)
- *vram_type =
convert_atom_mem_type_to_vram_type(adev, mem_type);
- mem_channel_number =
vram_module->v9.channel_num;
- mem_channel_width =
vram_module->v9.channel_width;
- if (vram_width)
- *vram_width =
mem_channel_number * (1 << mem_channel_width);
- mem_vendor =
(vram_module->v9.vender_rev_id) & 0xF;
- if (vram_vendor)
- *vram_vendor =
mem_vendor;
- break;
- default:
- return -EINVAL;
- }
- } else {
- /* invalid frev */
- return -EINVAL;
+ return -EINVAL;
+ }
+ } else if (frev == 2) {
+ switch (crev) {
+ /* v23 */
+ case 3:
+ if (module_id > vram_info->v23.vram_module_num)
+ module_id = 0;
+ vram_module = (union vram_module
*)vram_info->v23.vram_module;
+ while (i < module_id) {
+ vram_module = (union vram_module *)
+ ((u8 *)vram_module +
vram_module->v9.vram_module_size);
+ i++;
}
+ mem_type = vram_module->v9.memory_type;
+ if (vram_type)
+ *vram_type =
convert_atom_mem_type_to_vram_type(adev, mem_type);
+ mem_channel_number =
vram_module->v9.channel_num;
+ mem_channel_width =
vram_module->v9.channel_width;
+ if (vram_width)
+ *vram_width = mem_channel_number * (1
<< mem_channel_width);
+ mem_vendor = (vram_module->v9.vender_rev_id) &
0xF;
+ if (vram_vendor)
+ *vram_vendor = mem_vendor;
+ break;
+ /* v24 */
+ case 4:
+ if (module_id > vram_info->v24.vram_module_num)
+ module_id = 0;
+ vram_module = (union vram_module
*)vram_info->v24.vram_module;
+ while (i < module_id) {
+ vram_module = (union vram_module *)
+ ((u8 *)vram_module +
vram_module->v10.vram_module_size);
+ i++;
+ }
+ mem_type = vram_module->v10.memory_type;
+ if (vram_type)
+ *vram_type =
convert_atom_mem_type_to_vram_type(adev, mem_type);
+ mem_channel_number =
vram_module->v10.channel_num;
+ mem_channel_width =
vram_module->v10.channel_width;
+ if (vram_width)
+ *vram_width = mem_channel_number * (1
<< mem_channel_width);
+ mem_vendor = (vram_module->v10.vender_rev_id) &
0xF;
+ if (vram_vendor)
+ *vram_vendor = mem_vendor;
+ break;
+ /* v25 */
+ case 5:
+ if (module_id > vram_info->v25.vram_module_num)
+ module_id = 0;
+ vram_module = (union vram_module
*)vram_info->v25.vram_module;
+ while (i < module_id) {
+ vram_module = (union vram_module *)
+ ((u8 *)vram_module +
vram_module->v11.vram_module_size);
+ i++;
+ }
+ mem_type = vram_module->v11.memory_type;
+ if (vram_type)
+ *vram_type =
convert_atom_mem_type_to_vram_type(adev, mem_type);
+ mem_channel_number =
vram_module->v11.channel_num;
+ mem_channel_width =
vram_module->v11.channel_width;
+ if (vram_width)
+ *vram_width = mem_channel_number * (1
<< mem_channel_width);
+ mem_vendor = (vram_module->v11.vender_rev_id) &
0xF;
+ if (vram_vendor)
+ *vram_vendor = mem_vendor;
+ break;
+ /* v26 */
+ case 6:
+ if (module_id > vram_info->v26.vram_module_num)
+ module_id = 0;
+ vram_module = (union vram_module
*)vram_info->v26.vram_module;
+ while (i < module_id) {
+ vram_module = (union vram_module *)
+ ((u8 *)vram_module +
vram_module->v9.vram_module_size);
+ i++;
+ }
+ mem_type = vram_module->v9.memory_type;
+ if (vram_type)
+ *vram_type =
convert_atom_mem_type_to_vram_type(adev, mem_type);
+ mem_channel_number =
vram_module->v9.channel_num;
+ mem_channel_width =
vram_module->v9.channel_width;
+ if (vram_width)
+ *vram_width = mem_channel_number * (1
<< mem_channel_width);
+ mem_vendor = (vram_module->v9.vender_rev_id) &
0xF;
+ if (vram_vendor)
+ *vram_vendor = mem_vendor;
+ break;
+ default:
+ return -EINVAL;
}
+ } else {
+ /* invalid frev */
+ return -EINVAL;
}
+
+ } else {
+ return -EINVAL;
}
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
index 67c8d105729b..0760e4510513 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
@@ -30,6 +30,10 @@ uint32_t
amdgpu_atomfirmware_query_firmware_capability(struct amdgpu_device *ade
bool amdgpu_atomfirmware_gpu_virtualization_supported(struct amdgpu_device
*adev);
void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev);
int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev);
+int amdgpu_atomfirmware_get_integrated_system_info(struct amdgpu_device *adev,
+ int *vram_width, int *vram_type, int *vram_vendor);
+int amdgpu_atomfirmware_get_umc_info(struct amdgpu_device *adev,
+ int *vram_width, int *vram_type, int *vram_vendor);
int amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
int *vram_width, int *vram_type, int *vram_vendor);
int amdgpu_atomfirmware_get_uma_carveout_info(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index f165d4e401e8..ecb42b304ccc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -34,6 +34,7 @@
#include "amdgpu_ras.h"
#include "amdgpu_reset.h"
#include "amdgpu_xgmi.h"
+#include "amdgpu_atomfirmware.h"
#include <drm/drm_drv.h>
#include <drm/ttm/ttm_tt.h>
@@ -1748,3 +1749,24 @@ int amdgpu_gmc_init_mem_ranges(struct amdgpu_device
*adev)
return 0;
}
+
+int amdgpu_gmc_get_vram_info(struct amdgpu_device *adev,
+ int *vram_width, int *vram_type, int *vram_vendor)
+{
+ if (adev->flags & AMD_IS_APU)
+ return amdgpu_atomfirmware_get_integrated_system_info(adev,
+ vram_width, vram_type,
vram_vendor);
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(12, 0, 0):
+ case IP_VERSION(12, 0, 1):
+ case IP_VERSION(9, 5, 0):
+ case IP_VERSION(9, 4, 4):
+ case IP_VERSION(9, 4, 3):
+ return amdgpu_atomfirmware_get_umc_info(adev,
+ vram_width,
vram_type, vram_vendor);
+ default:
+ return amdgpu_atomfirmware_get_vram_info(adev,
+ vram_width,
vram_type, vram_vendor);
+ }
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
index b9fdc3276e81..32e73e8ba778 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
@@ -482,4 +482,6 @@ amdgpu_gmc_query_memory_partition(struct amdgpu_device
*adev);
int amdgpu_gmc_init_mem_ranges(struct amdgpu_device *adev);
void amdgpu_gmc_init_sw_mem_ranges(struct amdgpu_device *adev,
struct amdgpu_mem_partition_info
*mem_ranges);
+int amdgpu_gmc_get_vram_info(struct amdgpu_device *adev,
+ int *vram_width, int *vram_type, int *vram_vendor);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index 2568eeaae945..fd691b2a6e21 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -767,7 +767,7 @@ static int gmc_v10_0_sw_init(struct amdgpu_ip_block
*ip_block)
adev->gmc.vram_type = AMDGPU_VRAM_TYPE_GDDR6;
adev->gmc.vram_width = 1 * 128; /* numchan * chansize */
} else {
- r = amdgpu_atomfirmware_get_vram_info(adev,
+ r = amdgpu_gmc_get_vram_info(adev,
&vram_width, &vram_type, &vram_vendor);
adev->gmc.vram_width = vram_width;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
index 6349e239a367..e6db87b94eb1 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
@@ -751,7 +751,7 @@ static int gmc_v11_0_sw_init(struct amdgpu_ip_block
*ip_block)
spin_lock_init(&adev->gmc.invalidate_lock);
- r = amdgpu_atomfirmware_get_vram_info(adev,
+ r = amdgpu_gmc_get_vram_info(adev,
&vram_width, &vram_type,
&vram_vendor);
adev->gmc.vram_width = vram_width;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
index f1079bd8cf00..6e184ea069ef 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
@@ -825,7 +825,7 @@ static int gmc_v12_0_sw_init(struct amdgpu_ip_block
*ip_block)
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(12, 1, 0)) {
gmc_v12_1_init_vram_info(adev);
} else {
- r = amdgpu_atomfirmware_get_vram_info(adev,
+ r = amdgpu_gmc_get_vram_info(adev,
&vram_width, &vram_type, &vram_vendor);
adev->gmc.vram_width = vram_width;
adev->gmc.vram_type = vram_type;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 1ca0202cfdea..d865059e884a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -1823,24 +1823,37 @@ static void gmc_v9_0_save_registers(struct
amdgpu_device *adev)
adev->gmc.sdpif_register = RREG32_SOC15(DCE, 0,
mmDCHUBBUB_SDPIF_MMIO_CNTRL_0);
}
-static void gmc_v9_4_3_init_vram_info(struct amdgpu_device *adev)
+static void gmc_v9_0_init_vram_info(struct amdgpu_device *adev)
{
static const u32 regBIF_BIOS_SCRATCH_4 = 0x50;
+ int dev_var = adev->pdev->device & 0xF;
u32 vram_info;
- adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM;
- adev->gmc.vram_width = 128 * 64;
-
- if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0))
- adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM3E;
-
- if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) &&
- adev->rev_id == 0x3)
- adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM3E;
-
- if (!(adev->flags & AMD_IS_APU) && !amdgpu_sriov_vf(adev)) {
- vram_info = RREG32(regBIF_BIOS_SCRATCH_4);
- adev->gmc.vram_vendor = vram_info & 0xF;
+ if (adev->gmc.is_app_apu) {
+ adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM;
+ adev->gmc.vram_width = 128 * 64;
+ } else if (adev->flags & AMD_IS_APU) {
+ adev->gmc.vram_type = AMDGPU_VRAM_TYPE_DDR4;
+ adev->gmc.vram_width = 64 * 64;
+ } else if (amdgpu_is_multi_aid(adev)) {
+ adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM;
+ adev->gmc.vram_width = 128 * 64;
+
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0))
+ adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM3E;
+
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4)
&&
+ adev->rev_id == 0x3)
+ adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM3E;
+
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3)
&&
+ (dev_var == 0x5))
+ adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM3E;
+
+ if (!(adev->flags & AMD_IS_APU) && !amdgpu_sriov_vf(adev)) {
+ vram_info = RREG32(regBIF_BIOS_SCRATCH_4);
+ adev->gmc.vram_vendor = vram_info & 0xF;
+ }
}
}
@@ -1856,19 +1869,11 @@ static int gmc_v9_0_sw_init(struct amdgpu_ip_block
*ip_block)
spin_lock_init(&adev->gmc.invalidate_lock);
- if (amdgpu_is_multi_aid(adev)) {
- gmc_v9_4_3_init_vram_info(adev);
- } else if (!adev->bios) {
- if (adev->flags & AMD_IS_APU) {
- adev->gmc.vram_type = AMDGPU_VRAM_TYPE_DDR4;
- adev->gmc.vram_width = 64 * 64;
- } else {
- adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM;
- adev->gmc.vram_width = 128 * 64;
- }
+ if (!adev->bios) {
+ gmc_v9_0_init_vram_info(adev);
} else {
- r = amdgpu_atomfirmware_get_vram_info(adev,
- &vram_width, &vram_type, &vram_vendor);
+ r = amdgpu_gmc_get_vram_info(adev,
+ &vram_width, &vram_type, &vram_vendor);
if (amdgpu_sriov_vf(adev))
/* For Vega10 SR-IOV, vram_width can't be read from
ATOM as RAVEN,
* and DF related registers is not readable, seems
hardcord is the
@@ -1896,6 +1901,7 @@ static int gmc_v9_0_sw_init(struct amdgpu_ip_block
*ip_block)
adev->gmc.vram_type = vram_type;
adev->gmc.vram_vendor = vram_vendor;
}
+
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(9, 1, 0):
case IP_VERSION(9, 2, 2):
--
2.34.1