From: Christian K?nig <christian.koe...@amd.com>

Signed-off-by: Christian K?nig <christian.koenig at amd.com>
---
 drivers/gpu/drm/radeon/radeon.h        |  12 +-
 drivers/gpu/drm/radeon/radeon_cs.c     |   5 -
 drivers/gpu/drm/radeon/radeon_device.c |   1 -
 drivers/gpu/drm/radeon/radeon_vm.c     | 389 ++++++++++++---------------------
 4 files changed, 141 insertions(+), 266 deletions(-)

diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index f4fb984..375dbf5 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -854,8 +854,12 @@ struct radeon_mec {
 #define R600_PTE_READABLE      (1 << 5)
 #define R600_PTE_WRITEABLE     (1 << 6)

+struct radeon_vm_pt {
+       struct radeon_bo                *bo;
+       uint64_t                        addr;
+};
+
 struct radeon_vm {
-       struct list_head                list;
        struct list_head                va;
        unsigned                        id;

@@ -864,7 +868,7 @@ struct radeon_vm {
        uint64_t                        pd_gpu_addr;

        /* array of page tables, one for each page directory entry */
-       struct radeon_sa_bo             **page_tables;
+       struct radeon_vm_pt             *page_tables;

        struct mutex                    mutex;
        /* last fence for cs using this vm */
@@ -877,9 +881,7 @@ struct radeon_vm {

 struct radeon_vm_manager {
        struct mutex                    lock;
-       struct list_head                lru_vm;
        struct radeon_fence             *active[RADEON_NUM_VM];
-       struct radeon_sa_manager        sa_manager;
        uint32_t                        max_pfn;
        /* number of VMIDs */
        unsigned                        nvm;
@@ -2793,11 +2795,9 @@ int radeon_vm_manager_init(struct radeon_device *rdev);
 void radeon_vm_manager_fini(struct radeon_device *rdev);
 int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm);
 void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm);
-int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm);
 struct radeon_bo_list *radeon_vm_add_bos(struct radeon_device *rdev,
                                         struct radeon_vm *vm,
                                          struct list_head *head);
-void radeon_vm_add_to_lru(struct radeon_device *rdev, struct radeon_vm *vm);
 struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
                                       struct radeon_vm *vm, int ring);
 void radeon_vm_flush(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c 
b/drivers/gpu/drm/radeon/radeon_cs.c
index 335cc84..849f997 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -420,10 +420,6 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device 
*rdev,

        mutex_lock(&rdev->vm_manager.lock);
        mutex_lock(&vm->mutex);
-       r = radeon_vm_alloc_pt(rdev, vm);
-       if (r) {
-               goto out;
-       }
        r = radeon_bo_vm_update_pte(parser, vm);
        if (r) {
                goto out;
@@ -441,7 +437,6 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
        }

 out:
-       radeon_vm_add_to_lru(rdev, vm);
        mutex_unlock(&vm->mutex);
        mutex_unlock(&rdev->vm_manager.lock);
        return r;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c 
b/drivers/gpu/drm/radeon/radeon_device.c
index fa7841b..e58dbab 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1198,7 +1198,6 @@ int radeon_device_init(struct radeon_device *rdev,
         * Max GPUVM size for cayman and SI is 40 bits.
         */
        rdev->vm_manager.max_pfn = 1 << 20;
-       INIT_LIST_HEAD(&rdev->vm_manager.lru_vm);

        /* Set asic functions */
        r = radeon_asic_init(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c 
b/drivers/gpu/drm/radeon/radeon_vm.c
index b50e50f..5441a6a 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -84,83 +84,19 @@ static unsigned radeon_vm_directory_size(struct 
radeon_device *rdev)
  */
 int radeon_vm_manager_init(struct radeon_device *rdev)
 {
-       struct radeon_vm *vm;
-       struct radeon_bo_va *bo_va;
        int r;
-       unsigned size;

        if (!rdev->vm_manager.enabled) {
-               /* allocate enough for 2 full VM pts */
-               size = radeon_vm_directory_size(rdev);
-               size += rdev->vm_manager.max_pfn * 8;
-               size *= 2;
-               r = radeon_sa_bo_manager_init(rdev, 
&rdev->vm_manager.sa_manager,
-                                             RADEON_GPU_PAGE_ALIGN(size),
-                                             RADEON_VM_PTB_ALIGN_SIZE,
-                                             RADEON_GEM_DOMAIN_VRAM);
-               if (r) {
-                       dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n",
-                               (rdev->vm_manager.max_pfn * 8) >> 10);
-                       return r;
-               }
-
                r = radeon_asic_vm_init(rdev);
                if (r)
                        return r;

                rdev->vm_manager.enabled = true;
-
-               r = radeon_sa_bo_manager_start(rdev, 
&rdev->vm_manager.sa_manager);
-               if (r)
-                       return r;
-       }
-
-       /* restore page table */
-       list_for_each_entry(vm, &rdev->vm_manager.lru_vm, list) {
-               if (vm->page_tables == NULL)
-                       continue;
-
-               list_for_each_entry(bo_va, &vm->va, vm_list) {
-                       bo_va->valid = false;
-               }
        }
        return 0;
 }

 /**
- * radeon_vm_free_pt - free the page table for a specific vm
- *
- * @rdev: radeon_device pointer
- * @vm: vm to unbind
- *
- * Free the page table of a specific vm (cayman+).
- *
- * Global and local mutex must be lock!
- */
-static void radeon_vm_free_pt(struct radeon_device *rdev,
-                                   struct radeon_vm *vm)
-{
-       struct radeon_bo_va *bo_va;
-       int i;
-
-       if (!vm->page_tables)
-               return;
-
-       list_del_init(&vm->list);
-       list_for_each_entry(bo_va, &vm->va, vm_list) {
-               bo_va->valid = false;
-       }
-
-       if (vm->page_tables == NULL)
-               return;
-
-       for (i = 0; i < radeon_vm_num_pdes(rdev); i++)
-               radeon_sa_bo_free(rdev, &vm->page_tables[i], vm->fence);
-
-       kfree(vm->page_tables);
-}
-
-/**
  * radeon_vm_manager_fini - tear down the vm manager
  *
  * @rdev: radeon_device pointer
@@ -169,105 +105,17 @@ static void radeon_vm_free_pt(struct radeon_device *rdev,
  */
 void radeon_vm_manager_fini(struct radeon_device *rdev)
 {
-       struct radeon_vm *vm, *tmp;
        int i;

        if (!rdev->vm_manager.enabled)
                return;

        mutex_lock(&rdev->vm_manager.lock);
-       /* free all allocated page tables */
-       list_for_each_entry_safe(vm, tmp, &rdev->vm_manager.lru_vm, list) {
-               mutex_lock(&vm->mutex);
-               radeon_vm_free_pt(rdev, vm);
-               mutex_unlock(&vm->mutex);
-       }
-       for (i = 0; i < RADEON_NUM_VM; ++i) {
+       for (i = 0; i < RADEON_NUM_VM; ++i)
                radeon_fence_unref(&rdev->vm_manager.active[i]);
-       }
        radeon_asic_vm_fini(rdev);
-       mutex_unlock(&rdev->vm_manager.lock);
-
-       radeon_sa_bo_manager_suspend(rdev, &rdev->vm_manager.sa_manager);
-       radeon_sa_bo_manager_fini(rdev, &rdev->vm_manager.sa_manager);
        rdev->vm_manager.enabled = false;
-}
-
-/**
- * radeon_vm_evict - evict page table to make room for new one
- *
- * @rdev: radeon_device pointer
- * @vm: VM we want to allocate something for
- *
- * Evict a VM from the lru, making sure that it isn't @vm. (cayman+).
- * Returns 0 for success, -ENOMEM for failure.
- *
- * Global and local mutex must be locked!
- */
-static int radeon_vm_evict(struct radeon_device *rdev, struct radeon_vm *vm)
-{
-       struct radeon_vm *vm_evict;
-
-       if (list_empty(&rdev->vm_manager.lru_vm))
-               return -ENOMEM;
-
-       vm_evict = list_first_entry(&rdev->vm_manager.lru_vm,
-                                   struct radeon_vm, list);
-       if (vm_evict == vm)
-               return -ENOMEM;
-
-       mutex_lock(&vm_evict->mutex);
-       radeon_vm_free_pt(rdev, vm_evict);
-       mutex_unlock(&vm_evict->mutex);
-       return 0;
-}
-
-/**
- * radeon_vm_alloc_pt - allocates a page table for a VM
- *
- * @rdev: radeon_device pointer
- * @vm: vm to bind
- *
- * Allocate a page table for the requested vm (cayman+).
- * Returns 0 for success, error for failure.
- *
- * Global and local mutex must be locked!
- */
-int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm)
-{
-       unsigned pts_size;
-
-       if (vm == NULL)
-               return -EINVAL;
-
-       if (vm->page_tables != NULL)
-               return 0;
-
-       /* allocate page table array */
-       pts_size = radeon_vm_num_pdes(rdev) * sizeof(struct radeon_sa_bo *);
-       vm->page_tables = kzalloc(pts_size, GFP_KERNEL);
-       if (vm->page_tables == NULL) {
-               DRM_ERROR("Cannot allocate memory for page table array\n");
-               return -ENOMEM;
-       }
-
-       return 0;
-}
-
-/**
- * radeon_vm_add_to_lru - add VMs page table to LRU list
- *
- * @rdev: radeon_device pointer
- * @vm: vm to add to LRU
- *
- * Add the allocated page table to the LRU list (cayman+).
- *
- * Global mutex must be locked!
- */
-void radeon_vm_add_to_lru(struct radeon_device *rdev, struct radeon_vm *vm)
-{
-       list_del_init(&vm->list);
-       list_add_tail(&vm->list, &rdev->vm_manager.lru_vm);
+       mutex_unlock(&rdev->vm_manager.lock);
 }

 /**
@@ -281,8 +129,10 @@ struct radeon_bo_list *radeon_vm_add_bos(struct 
radeon_device *rdev,
                                         struct list_head *head)
 {
        struct radeon_bo_list *list;
+       unsigned i, idx, size;

-       list = kmalloc(sizeof(struct radeon_bo_list), GFP_KERNEL);
+       size = (radeon_vm_num_pdes(rdev) + 1) * sizeof(struct radeon_bo_list);
+       list = kmalloc(size, GFP_KERNEL);
        if (!list)
                return NULL;

@@ -294,6 +144,18 @@ struct radeon_bo_list *radeon_vm_add_bos(struct 
radeon_device *rdev,
        list[0].tv.bo = &vm->page_directory->tbo;
        radeon_bo_list_add_object(&list[0], head);

+       for (i = 0, idx = 1; i < radeon_vm_num_pdes(rdev); i++) {
+               if (!vm->page_tables[i].bo)
+                       continue;
+
+               list[idx].bo = vm->page_tables[i].bo;
+               list[idx].written = 1;
+               list[idx].domain = RADEON_GEM_DOMAIN_VRAM;
+               list[idx].alt_domain = RADEON_GEM_DOMAIN_VRAM;
+               list[idx].tv.bo = &list[idx].bo->tbo;
+               radeon_bo_list_add_object(&list[idx++], head);
+       }
+
        return list;
 }

@@ -475,6 +337,63 @@ struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device 
*rdev,
 }

 /**
+ * radeon_vm_clear_bo - initially clear the page dir/table
+ *
+ * @rdev: radeon_device pointer
+ * @bo: bo to clear
+ */
+static int radeon_vm_clear_bo(struct radeon_device *rdev,
+                             struct radeon_bo *bo)
+{
+        struct ttm_validate_buffer tv;
+        struct ww_acquire_ctx ticket;
+        struct list_head head;
+       struct radeon_ib ib;
+       unsigned entries;
+       uint64_t addr;
+       int r;
+
+        memset(&tv, 0, sizeof(tv));
+        tv.bo = &bo->tbo;
+
+        INIT_LIST_HEAD(&head);
+        list_add(&tv.head, &head);
+
+        r = ttm_eu_reserve_buffers(&ticket, &head);
+        if (r)
+               return r;
+
+        r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
+        if (r)
+                goto error;
+
+       addr = radeon_bo_gpu_offset(bo);
+       entries = radeon_bo_size(bo) / 8;
+
+       r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib,
+                         NULL, entries * 2 + 64);
+       if (r)
+                goto error;
+
+       ib.length_dw = 0;
+
+       radeon_asic_vm_set_page(rdev, &ib, addr, 0, entries, 0, 0);
+
+       r = radeon_ib_schedule(rdev, &ib, NULL);
+       if (r)
+                goto error;
+
+       ttm_eu_fence_buffer_objects(&ticket, &head, ib.fence);
+       radeon_ib_free(rdev, &ib);
+
+       return 0;
+
+error:
+       ttm_eu_backoff_reservation(&ticket, &head);
+       return r;
+}
+
+/**
  * radeon_vm_bo_set_addr - set bos virtual address inside a vm
  *
  * @rdev: radeon_device pointer
@@ -498,7 +417,8 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
        struct radeon_vm *vm = bo_va->vm;
        struct radeon_bo_va *tmp;
        struct list_head *head;
-       unsigned last_pfn;
+       unsigned last_pfn, pt_idx;
+       int r;

        if (soffset) {
                /* make sure object fit at this offset */
@@ -549,8 +469,36 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
        bo_va->valid = false;
        list_move(&bo_va->vm_list, head);

+       soffset = (soffset / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE;
+       eoffset = (eoffset / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE;
+
+       radeon_bo_unreserve(bo_va->bo);
+
+       /* walk over the address space and allocate the page tables */
+       for (pt_idx = soffset; pt_idx <= eoffset; ++pt_idx) {
+               if (vm->page_tables[pt_idx].bo)
+                       continue;
+
+               vm->page_tables[pt_idx].addr = 0;
+               r = radeon_bo_create(rdev, RADEON_VM_PTE_COUNT * 8,
+                                    RADEON_GPU_PAGE_SIZE, false, 
+                                    RADEON_GEM_DOMAIN_VRAM, NULL,
+                                    &vm->page_tables[pt_idx].bo);
+               if (r)
+                       return r;
+
+               mutex_unlock(&vm->mutex);
+               r = radeon_vm_clear_bo(rdev, vm->page_tables[pt_idx].bo);
+               mutex_lock(&vm->mutex);
+               if (r) {
+                       radeon_bo_unref(&vm->page_tables[pt_idx].bo);
+                       vm->page_tables[pt_idx].bo = NULL;
+                       return r;
+               }
+       }
+
        mutex_unlock(&vm->mutex);
-       return 0;
+       return radeon_bo_reserve(bo_va->bo, false);
 }

 /**
@@ -620,37 +568,21 @@ static int radeon_vm_update_pdes(struct radeon_device 
*rdev,
        uint64_t last_pde = ~0, last_pt = ~0;
        unsigned count = 0;
        uint64_t pt_idx;
-       int r;

        start = (start / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE;
        end = (end / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE;

        /* walk over the address space and update the page directory */
        for (pt_idx = start; pt_idx <= end; ++pt_idx) {
-               uint64_t pde, pt;
+               struct radeon_bo *bo = vm->page_tables[pt_idx].bo;
+               uint64_t pde, pt = radeon_bo_gpu_offset(bo);

-               if (vm->page_tables[pt_idx])
+               if (vm->page_tables[pt_idx].addr == pt)
                        continue;
-
-retry:
-               r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager,
-                                    &vm->page_tables[pt_idx],
-                                    RADEON_VM_PTE_COUNT * 8,
-                                    RADEON_GPU_PAGE_SIZE, false);
-
-               if (r == -ENOMEM) {
-                       r = radeon_vm_evict(rdev, vm);
-                       if (r)
-                               return r;
-                       goto retry;
-               } else if (r) {
-                       return r;
-               }
+               vm->page_tables[pt_idx].addr = pt;

                pde = radeon_bo_gpu_offset(vm->page_directory) + pt_idx * 8;

-               pt = radeon_sa_bo_gpu_addr(vm->page_tables[pt_idx]);
-
                if (((last_pde + 8 * count) != pde) ||
                    ((last_pt + incr * count) != pt)) {

@@ -658,10 +590,6 @@ retry:
                                radeon_asic_vm_set_page(rdev, ib, last_pde,
                                                        last_pt, count, incr,
                                                        R600_PTE_VALID);
-
-                               count *= RADEON_VM_PTE_COUNT;
-                               radeon_asic_vm_set_page(rdev, ib, last_pt, 0,
-                                                       count, 0, 0);
                        }

                        count = 1;
@@ -672,15 +600,10 @@ retry:
                }
        }

-       if (count) {
+       if (count)
                radeon_asic_vm_set_page(rdev, ib, last_pde, last_pt, count,
                                        incr, R600_PTE_VALID);

-               count *= RADEON_VM_PTE_COUNT;
-               radeon_asic_vm_set_page(rdev, ib, last_pt, 0,
-                                       count, 0, 0);
-       }
-
        return 0;
 }

@@ -724,7 +647,7 @@ static void radeon_vm_update_ptes(struct radeon_device 
*rdev,
                else
                        nptes = RADEON_VM_PTE_COUNT - (addr & mask);

-               pte = radeon_sa_bo_gpu_addr(vm->page_tables[pt_idx]);
+               pte = radeon_bo_gpu_offset(vm->page_tables[pt_idx].bo);
                pte += (addr & mask) * 8;

                if ((last_pte + 8 * count) != pte) {
@@ -778,10 +701,6 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
        uint64_t addr;
        int r;

-       /* nothing to do if vm isn't bound */
-       if (vm->page_tables == NULL)
-               return 0;
-
        bo_va = radeon_vm_bo_find(vm, bo);
        if (bo_va == NULL) {
                dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
@@ -938,11 +857,7 @@ void radeon_vm_bo_invalidate(struct radeon_device *rdev,
  */
 int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
 {
-       unsigned pd_size, pd_entries;
-        struct ttm_validate_buffer tv;
-        struct ww_acquire_ctx ticket;
-        struct list_head head;
-       struct radeon_ib ib;
+       unsigned pd_size, pd_entries, pts_size;
        int r;

        vm->id = 0;
@@ -950,70 +865,33 @@ int radeon_vm_init(struct radeon_device *rdev, struct 
radeon_vm *vm)
        vm->last_flush = NULL;
        vm->last_id_use = NULL;
        mutex_init(&vm->mutex);
-       INIT_LIST_HEAD(&vm->list);
        INIT_LIST_HEAD(&vm->va);

        pd_size = radeon_vm_directory_size(rdev);
        pd_entries = radeon_vm_num_pdes(rdev);

+       /* allocate page table array */
+       pts_size = pd_entries * sizeof(struct radeon_vm_pt);
+       vm->page_tables = kzalloc(pts_size, GFP_KERNEL);
+       if (vm->page_tables == NULL) {
+               DRM_ERROR("Cannot allocate memory for page table array\n");
+               return -ENOMEM;
+       }
+
        r = radeon_bo_create(rdev, pd_size, RADEON_VM_PTB_ALIGN_SIZE, false,
                             RADEON_GEM_DOMAIN_VRAM, NULL,
                             &vm->page_directory);
        if (r)
                return r;

-       /* Initially clear the page directory */
-
-        memset(&tv, 0, sizeof(tv));
-        tv.bo = &vm->page_directory->tbo;
-
-        INIT_LIST_HEAD(&head);
-        list_add(&tv.head, &head);
-
-        r = ttm_eu_reserve_buffers(&ticket, &head);
-        if (r)
-                goto error_free;
-
-        r = ttm_bo_validate(&vm->page_directory->tbo,
-                           &vm->page_directory->placement,
-                           true, false);
-        if (r)
-                goto error_unreserve;
-
-       vm->pd_gpu_addr = radeon_bo_gpu_offset(vm->page_directory);
-
-       r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib,
-                         NULL, pd_entries * 2 + 64);
-       if (r)
-                goto error_unreserve;
-
-       ib.length_dw = 0;
-
-       radeon_asic_vm_set_page(rdev, &ib, vm->pd_gpu_addr,
-                               0, pd_entries, 0, 0);
-
-       radeon_semaphore_sync_to(ib.semaphore, vm->last_id_use);
-       r = radeon_ib_schedule(rdev, &ib, NULL);
-       if (r)
-                goto error_unreserve;
-
-       ttm_eu_fence_buffer_objects(&ticket, &head, ib.fence);
-       radeon_fence_unref(&vm->fence);
-       vm->fence = radeon_fence_ref(ib.fence);
-       radeon_fence_unref(&vm->last_flush);
-
-       radeon_ib_free(rdev, &ib);
+       r = radeon_vm_clear_bo(rdev, vm->page_directory);
+       if (r) {
+               radeon_bo_unref(&vm->page_directory);
+               vm->page_directory = NULL;
+               return r;
+       }

        return 0;
-
-error_unreserve:
-       ttm_eu_backoff_reservation(&ticket, &head);
-
-error_free:
-       radeon_bo_unref(&vm->page_directory);
-       vm->page_directory = NULL;
-       return r;
-
 }

 /**
@@ -1028,12 +906,9 @@ error_free:
 void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
 {
        struct radeon_bo_va *bo_va, *tmp;
-       int r;
+       int i, r;

-       mutex_lock(&rdev->vm_manager.lock);
        mutex_lock(&vm->mutex);
-       radeon_vm_free_pt(rdev, vm);
-       mutex_unlock(&rdev->vm_manager.lock);

        if (!list_empty(&vm->va)) {
                dev_err(rdev->dev, "still active bo inside vm\n");
@@ -1047,10 +922,16 @@ void radeon_vm_fini(struct radeon_device *rdev, struct 
radeon_vm *vm)
                        kfree(bo_va);
                }
        }
+
+       for (i = 0; i < radeon_vm_num_pdes(rdev); i++)
+               radeon_bo_unref(&vm->page_tables[i].bo);
+       kfree(vm->page_tables);
+
+       radeon_bo_unref(&vm->page_directory);
+
        radeon_fence_unref(&vm->fence);
        radeon_fence_unref(&vm->last_flush);
        radeon_fence_unref(&vm->last_id_use);
-       mutex_unlock(&vm->mutex);

-       radeon_bo_unref(&vm->page_directory);
+       mutex_unlock(&vm->mutex);
 }
-- 
1.8.3.2

Reply via email to