Am 13.12.2017 um 08:19 schrieb Chunming Zhou:
Change-Id: Ic1f39d3bc853e9e4259d3e03a22920eda822eec5
Signed-off-by: Chunming Zhou <david1.z...@amd.com>

You dropped reversing the ordering and replaced that with noting the root level separately? Nifty idea.

Just please drop AMDGPU_VM_SUBPTB, translate further is something we hopefully will only use the first and last time for Raven.

So I would like to keep that completely transparent to the VM code and do the patching in the GMC specific implementation for Raven.

Christian.

---
  drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 69 +++++++++++++++++++++++++---------
  drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 14 +++++++
  2 files changed, 66 insertions(+), 17 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 709587d8a77f..fc858ddf9319 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -148,12 +148,29 @@ struct amdgpu_prt_cb {
  static unsigned amdgpu_vm_level_shift(struct amdgpu_device *adev,
                                      unsigned level)
  {
-       if (level != adev->vm_manager.num_level)
-               return 9 * (adev->vm_manager.num_level - level - 1) +
+       unsigned shift = 0xff;
+
+       switch (level) {
+       case AMDGPU_VM_PDB2:
+       case AMDGPU_VM_PDB1:
+       case AMDGPU_VM_PDB0:
+               shift = 9 * (adev->vm_manager.last_level - level - 1) +
                        adev->vm_manager.block_size;
-       else
-               /* For the page tables on the leaves */
-               return 0;
+               break;
+       case AMDGPU_VM_PTB:
+               if (adev->vm_manager.last_level == AMDGPU_VM_PTB)
+                       shift = 0;
+               else
+                       shift = adev->vm_manager.block_size;
+               break;
+       case AMDGPU_VM_SUBPTB:
+               shift = 0;
+               break;
+       default:
+               dev_err(adev->dev, "the level%d isn't supported.\n", level);
+       }
+
+       return shift;
  }
/**
@@ -166,12 +183,13 @@ static unsigned amdgpu_vm_level_shift(struct 
amdgpu_device *adev,
  static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
                                      unsigned level)
  {
-       unsigned shift = amdgpu_vm_level_shift(adev, 0);
+       unsigned shift = amdgpu_vm_level_shift(adev,
+                                              adev->vm_manager.root_level);
- if (level == 0)
+       if (level == adev->vm_manager.root_level)
                /* For the root directory */
                return round_up(adev->vm_manager.max_pfn, 1 << shift) >> shift;
-       else if (level != adev->vm_manager.num_level)
+       else if (level != adev->vm_manager.last_level)
                /* Everything in between */
                return 512;
        else
@@ -385,7 +403,7 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device 
*adev,
                        spin_unlock(&vm->status_lock);
                }
- if (level < adev->vm_manager.num_level) {
+               if (level < adev->vm_manager.last_level) {
                        uint64_t sub_saddr = (pt_idx == from) ? saddr : 0;
                        uint64_t sub_eaddr = (pt_idx == to) ? eaddr :
                                ((1 << shift) - 1);
@@ -431,7 +449,8 @@ int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
        saddr /= AMDGPU_GPU_PAGE_SIZE;
        eaddr /= AMDGPU_GPU_PAGE_SIZE;
- return amdgpu_vm_alloc_levels(adev, vm, &vm->root, saddr, eaddr, 0);
+       return amdgpu_vm_alloc_levels(adev, vm, &vm->root, saddr, eaddr,
+                                     adev->vm_manager.root_level);
  }
/**
@@ -1247,7 +1266,8 @@ int amdgpu_vm_update_directories(struct amdgpu_device 
*adev,
        return 0;
error:
-       amdgpu_vm_invalidate_level(adev, vm, &vm->root, 0);
+       amdgpu_vm_invalidate_level(adev, vm, &vm->root,
+                                  adev->vm_manager.root_level);
        amdgpu_job_free(job);
        return r;
  }
@@ -1266,7 +1286,7 @@ void amdgpu_vm_get_entry(struct amdgpu_pte_update_params 
*p, uint64_t addr,
                         struct amdgpu_vm_pt **entry,
                         struct amdgpu_vm_pt **parent)
  {
-       unsigned level = 0;
+       unsigned level = p->adev->vm_manager.root_level;
*parent = NULL;
        *entry = &p->vm->root;
@@ -1278,7 +1298,7 @@ void amdgpu_vm_get_entry(struct amdgpu_pte_update_params 
*p, uint64_t addr,
                addr &= (1ULL << shift) - 1;
        }
- if (level != p->adev->vm_manager.num_level)
+       if (level != p->adev->vm_manager.last_level)
                *entry = NULL;
  }
@@ -1320,7 +1340,7 @@ static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p,
                return;
        entry->huge = !!(flags & AMDGPU_PDE_PTE);
- amdgpu_gart_get_vm_pde(p->adev, p->adev->vm_manager.num_level - 1,
+       amdgpu_gart_get_vm_pde(p->adev, p->adev->vm_manager.last_level - 1,
                               &dst, &flags);
if (use_cpu_update) {
@@ -1636,7 +1656,8 @@ static int amdgpu_vm_bo_update_mapping(struct 
amdgpu_device *adev,
error_free:
        amdgpu_job_free(job);
-       amdgpu_vm_invalidate_level(adev, vm, &vm->root, 0);
+       amdgpu_vm_invalidate_level(adev, vm, &vm->root,
+                                  adev->vm_manager.root_level);
        return r;
  }
@@ -2552,7 +2573,20 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size,
                tmp >>= amdgpu_vm_block_size - 9;
        tmp = DIV_ROUND_UP(fls64(tmp) - 1, 9) - 1;
        adev->vm_manager.num_level = min(max_level, (unsigned)tmp);
-
+       switch (adev->vm_manager.num_level) {
+       case 3:
+               adev->vm_manager.root_level = AMDGPU_VM_PDB2;
+               break;
+       case 2:
+               adev->vm_manager.root_level = AMDGPU_VM_PDB1;
+               break;
+       case 1:
+               adev->vm_manager.root_level = AMDGPU_VM_PDB0;
+               break;
+       default:
+               dev_err(adev->dev, "VMPT only supports 2~4+1 levels\n");
+       }
+       adev->vm_manager.last_level = AMDGPU_VM_PTB;
        /* block size depends on vm size and hw setup*/
        if (amdgpu_vm_block_size != -1)
                adev->vm_manager.block_size =
@@ -2782,7 +2816,8 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct 
amdgpu_vm *vm)
        if (r) {
                dev_err(adev->dev, "Leaking page tables because BO reservation 
failed\n");
        } else {
-               amdgpu_vm_free_levels(adev, &vm->root, 0);
+               amdgpu_vm_free_levels(adev, &vm->root,
+                                     adev->vm_manager.root_level);
                amdgpu_bo_unreserve(root);
        }
        amdgpu_bo_unref(&root);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index e52bf980669f..9d5b2ce5e527 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -120,6 +120,18 @@ struct amdgpu_bo_list_entry;
  #define AMDGPU_VM_USE_CPU_FOR_GFX (1 << 0)
  #define AMDGPU_VM_USE_CPU_FOR_COMPUTE (1 << 1)
+/* VMPT level enumerate, and the hiberachy is:
+ * PDB2->PDB1->PDB0->PTB->SUBPTB
+ */
+enum amdgpu_vm_level {
+       AMDGPU_VM_PDB2,
+       AMDGPU_VM_PDB1,
+       AMDGPU_VM_PDB0,
+       AMDGPU_VM_PTB,
+       AMDGPU_VM_SUBPTB,
+       AMDGPU_VM_LEVELS
+};
+
  /* base structure for tracking BO usage in a VM */
  struct amdgpu_vm_bo_base {
        /* constant after initialization */
@@ -236,6 +248,8 @@ struct amdgpu_vm_manager {
        uint32_t                                num_level;
        uint32_t                                block_size;
        uint32_t                                fragment_size;
+       uint32_t                                root_level;
+       uint32_t                                last_level;
        /* vram base address for page table entry  */
        u64                                     vram_base_offset;
        /* vm pte handling */

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Reply via email to