From: Christian K?nig <deathsim...@vodafone.de>

With that in place clients are automatically blocking
until their memory request can be handled.

v2: block only if the memory request can't be satisfied
    in the first try, the first version actually lacked
    a night of sleep.

v3: make blocking optional, update comments and fix
    another bug with biggest hole tracking.

v4: drop debugfs portion

Signed-off-by: Christian K?nig <deathsimple at vodafone.de>
---
 drivers/gpu/drm/radeon/radeon.h        |    5 +-
 drivers/gpu/drm/radeon/radeon_gart.c   |    2 +-
 drivers/gpu/drm/radeon/radeon_object.h |    2 +-
 drivers/gpu/drm/radeon/radeon_ring.c   |   20 ++--
 drivers/gpu/drm/radeon/radeon_sa.c     |  178 ++++++++++++++++++++++----------
 5 files changed, 138 insertions(+), 69 deletions(-)

diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 1aefbd9..415a496 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -381,17 +381,16 @@ struct radeon_bo_list {
  * alignment).
  */
 struct radeon_sa_manager {
-       spinlock_t              lock;
+       wait_queue_head_t       queue;
        struct radeon_bo        *bo;
        struct list_head        sa_bo;
        unsigned                size;
+       struct list_head        *biggest_hole;
        uint64_t                gpu_addr;
        void                    *cpu_ptr;
        uint32_t                domain;
 };

-struct radeon_sa_bo;
-
 /* sub-allocation buffer */
 struct radeon_sa_bo {
        struct list_head                list;
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c 
b/drivers/gpu/drm/radeon/radeon_gart.c
index c58a036..7af4ff9 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -395,7 +395,7 @@ int radeon_vm_bind(struct radeon_device *rdev, struct 
radeon_vm *vm)
 retry:
        r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, &vm->sa_bo,
                             RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8),
-                            RADEON_GPU_PAGE_SIZE);
+                            RADEON_GPU_PAGE_SIZE, false);
        if (r) {
                if (list_empty(&rdev->vm_manager.lru_vm)) {
                        return r;
diff --git a/drivers/gpu/drm/radeon/radeon_object.h 
b/drivers/gpu/drm/radeon/radeon_object.h
index f9104be..a181c2f 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -158,7 +158,7 @@ extern int radeon_sa_bo_manager_suspend(struct 
radeon_device *rdev,
 extern int radeon_sa_bo_new(struct radeon_device *rdev,
                            struct radeon_sa_manager *sa_manager,
                            struct radeon_sa_bo *sa_bo,
-                           unsigned size, unsigned align);
+                           unsigned size, unsigned align, bool block);
 extern void radeon_sa_bo_free(struct radeon_device *rdev,
                              struct radeon_sa_bo *sa_bo);

diff --git a/drivers/gpu/drm/radeon/radeon_ring.c 
b/drivers/gpu/drm/radeon/radeon_ring.c
index e394131..b06e04f 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -121,7 +121,7 @@ retry:
                if (rdev->ib_pool.ibs[idx].fence == NULL) {
                        r = radeon_sa_bo_new(rdev, &rdev->ib_pool.sa_manager,
                                             &rdev->ib_pool.ibs[idx].sa_bo,
-                                            size, 256);
+                                            size, 256, false);
                        if (!r) {
                                *ib = &rdev->ib_pool.ibs[idx];
                                (*ib)->ptr = rdev->ib_pool.sa_manager.cpu_ptr;
@@ -202,10 +202,16 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct 
radeon_ib *ib)

 int radeon_ib_pool_init(struct radeon_device *rdev)
 {
-       struct radeon_sa_manager tmp;
        int i, r;

-       r = radeon_sa_bo_manager_init(rdev, &tmp,
+       radeon_mutex_lock(&rdev->ib_pool.mutex);
+       if (rdev->ib_pool.ready) {
+               return 0;
+       }
+       rdev->ib_pool.ready = true;
+       radeon_mutex_unlock(&rdev->ib_pool.mutex);
+
+       r = radeon_sa_bo_manager_init(rdev, &rdev->ib_pool.sa_manager,
                                      RADEON_IB_POOL_SIZE*64*1024,
                                      RADEON_GEM_DOMAIN_GTT);
        if (r) {
@@ -213,14 +219,6 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
        }

        radeon_mutex_lock(&rdev->ib_pool.mutex);
-       if (rdev->ib_pool.ready) {
-               radeon_mutex_unlock(&rdev->ib_pool.mutex);
-               radeon_sa_bo_manager_fini(rdev, &tmp);
-               return 0;
-       }
-
-       rdev->ib_pool.sa_manager = tmp;
-       INIT_LIST_HEAD(&rdev->ib_pool.sa_manager.sa_bo);
        for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
                rdev->ib_pool.ibs[i].fence = NULL;
                rdev->ib_pool.ibs[i].idx = i;
diff --git a/drivers/gpu/drm/radeon/radeon_sa.c 
b/drivers/gpu/drm/radeon/radeon_sa.c
index 4ce5c51..3212293 100644
--- a/drivers/gpu/drm/radeon/radeon_sa.c
+++ b/drivers/gpu/drm/radeon/radeon_sa.c
@@ -26,6 +26,7 @@
 /*
  * Authors:
  *    Jerome Glisse <glisse at freedesktop.org>
+ *    Christian K?nig <christian.koenig at amd.com>
  */
 #include "drmP.h"
 #include "drm.h"
@@ -37,9 +38,10 @@ int radeon_sa_bo_manager_init(struct radeon_device *rdev,
 {
        int r;

-       spin_lock_init(&sa_manager->lock);
+       init_waitqueue_head(&sa_manager->queue);
        sa_manager->bo = NULL;
        sa_manager->size = size;
+       sa_manager->biggest_hole = &sa_manager->sa_bo;
        sa_manager->domain = domain;
        INIT_LIST_HEAD(&sa_manager->sa_bo);

@@ -58,6 +60,7 @@ void radeon_sa_bo_manager_fini(struct radeon_device *rdev,
 {
        struct radeon_sa_bo *sa_bo, *tmp;

+       wake_up_all(&sa_manager->queue);
        if (!list_empty(&sa_manager->sa_bo)) {
                dev_err(rdev->dev, "sa_manager is not empty, clearing 
anyway\n");
        }
@@ -114,81 +117,150 @@ int radeon_sa_bo_manager_suspend(struct radeon_device 
*rdev,
        return r;
 }

+static inline unsigned radeon_sa_bo_hole_start(struct radeon_sa_manager *m,
+                                              struct list_head *entry)
+{
+       struct radeon_sa_bo *sa_bo;
+
+       if (entry == &m->sa_bo)
+               return 0;
+
+       sa_bo = list_entry(entry, struct radeon_sa_bo, list);
+       return sa_bo->offset + sa_bo->size;
+}
+
+static inline unsigned radeon_sa_bo_hole_end(struct radeon_sa_manager *m,
+                                            struct list_head *entry)
+{
+       if (entry->next == &m->sa_bo)
+               return m->size;
+
+       return list_entry(entry->next, struct radeon_sa_bo, list)->offset;
+}
+
+static inline unsigned radeon_sa_bo_min_free(struct radeon_sa_manager *m,
+                                            unsigned align)
+{
+       unsigned start, end, wasted;
+       start = radeon_sa_bo_hole_start(m, m->biggest_hole);
+       wasted = start % align;
+       if (wasted)
+               start += align - wasted;
+
+       end = radeon_sa_bo_hole_end(m, m->biggest_hole);
+       return start < end ? end - start : 0;
+}
+
 /*
  * Principe is simple, we keep a list of sub allocation in offset
  * order (first entry has offset == 0, last entry has the highest
  * offset).
  *
- * When allocating new object we first check if there is room at
- * the end total_size - (last_object_offset + last_object_size) >=
- * alloc_size. If so we allocate new object there.
- *
- * When there is not enough room at the end, we start waiting for
- * each sub object until we reach object_offset+object_size >=
- * alloc_size, this object then become the sub object we return.
+ * When allocating new objects we start checking at what's currently
+ * assumed to be the biggest hole, if that's not big enough we continue
+ * searching the list until we find something big enough or reach the
+ * biggest hole again. If the later happen we optionally block for the
+ * biggest hole to increase in size.
  *
- * Alignment can't be bigger than page size
  */
 int radeon_sa_bo_new(struct radeon_device *rdev,
                     struct radeon_sa_manager *sa_manager,
                     struct radeon_sa_bo *sa_bo,
-                    unsigned size, unsigned align)
+                    unsigned size, unsigned align, bool block)
 {
-       struct radeon_sa_bo *tmp;
-       struct list_head *head;
-       unsigned offset = 0, wasted = 0;
-       unsigned long flags;
+       struct list_head *head, *curr, *hole;
+       unsigned start, currsize, wasted, holesize;
+       int r;

        BUG_ON(align > RADEON_GPU_PAGE_SIZE);
        BUG_ON(size > sa_manager->size);
-       spin_lock_irqsave(&sa_manager->lock, flags);

-       /* no one ? */
-       if (list_empty(&sa_manager->sa_bo)) {
-               head = &sa_manager->sa_bo;
-               goto out;
-       }
+       spin_lock_irq(&sa_manager->queue.lock);
+
+       do {
+               curr = head = hole = sa_manager->biggest_hole;
+               holesize = radeon_sa_bo_min_free(sa_manager, 1);
+               do {
+                       start = radeon_sa_bo_hole_start(sa_manager, curr);
+                       currsize = radeon_sa_bo_hole_end(sa_manager, curr) - 
start;
+
+                       wasted = start % align;
+                       if (wasted) {
+                               wasted = align - wasted;
+                       }
+
+                       /* room after current big enough ? */
+                       if (currsize >= (size + wasted)) {
+                               sa_bo->manager = sa_manager;
+                               sa_bo->offset = start + wasted;
+                               sa_bo->size = size;
+                               list_add(&sa_bo->list, curr);
+
+                               /* consider the space left after the newly
+                                  added sa_bo as the biggest hole */
+                               currsize -= (size + wasted);
+                               if (hole == sa_bo->list.prev || holesize < 
currsize) {
+                                       hole = &sa_bo->list;
+                               }
+
+                               if (sa_manager->biggest_hole != hole) {
+                                       sa_manager->biggest_hole = hole;
+                                       wake_up_locked(&sa_manager->queue);
+                               }
+                               spin_unlock_irq(&sa_manager->queue.lock);
+                               return 0;
+                       }
+
+                       if (holesize < currsize) {
+                               hole = curr;
+                               holesize = currsize;
+                       }
+
+                       curr = curr->next;
+               } while (curr != head);

-       /* look for a hole big enough */
-       list_for_each_entry(tmp, &sa_manager->sa_bo, list) {
-               /* room before this object ? */
-               if (offset < tmp->offset && (tmp->offset - offset) >= size) {
-                       head = tmp->list.prev;
-                       goto out;
+               if (sa_manager->biggest_hole != hole) {
+                       sa_manager->biggest_hole = hole;
                }
-               offset = tmp->offset + tmp->size;
-               wasted = offset % align;
-               if (wasted) {
-                       offset += align - wasted;
+
+               if (block) {
+                       /* failed to find something big enough, wait
+                          for the biggest hole to increase in size */
+                       r = 
wait_event_interruptible_locked_irq(sa_manager->queue,
+                               radeon_sa_bo_min_free(sa_manager, align) >= size
+                       );
+                       if (r) {
+                               spin_unlock_irq(&sa_manager->queue.lock);
+                               return r;
+                       }
                }
-       }
-       /* room at the end ? */
-       head = sa_manager->sa_bo.prev;
-       tmp = list_entry(head, struct radeon_sa_bo, list);
-       offset = tmp->offset + tmp->size;
-       wasted = offset % align;
-       if (wasted) {
-               offset += wasted = align - wasted;
-       }
-       if ((sa_manager->size - offset) < size) {
-               /* failed to find somethings big enough */
-               spin_unlock_irqrestore(&sa_manager->lock, flags);
-               return -ENOMEM;
-       }
+       } while (block);
+       spin_unlock_irq(&sa_manager->queue.lock);

-out:
-       sa_bo->manager = sa_manager;
-       sa_bo->offset = offset;
-       sa_bo->size = size;
-       list_add(&sa_bo->list, head);
-       spin_unlock_irqrestore(&sa_manager->lock, flags);
-       return 0;
+       return -ENOMEM;
 }

 void radeon_sa_bo_free(struct radeon_device *rdev, struct radeon_sa_bo *sa_bo)
 {
+       struct radeon_sa_manager *sa_manager = sa_bo->manager;
+       unsigned bsize, fsize;
        unsigned long flags;
-       spin_lock_irqsave(&sa_bo->manager->lock, flags);
+
+       spin_lock_irqsave(&sa_manager->queue.lock, flags);
+       if (&sa_bo->list == sa_manager->biggest_hole ||
+           sa_bo->list.prev == sa_manager->biggest_hole) {
+
+               sa_manager->biggest_hole = sa_bo->list.prev;
+               wake_up_locked(&sa_manager->queue);
+       } else {
+               bsize = radeon_sa_bo_min_free(sa_manager, 1);
+               fsize = radeon_sa_bo_hole_start(sa_manager, sa_bo->list.prev);
+               fsize = radeon_sa_bo_hole_end(sa_manager, &sa_bo->list) - fsize;
+               if (fsize > bsize) {
+                       sa_manager->biggest_hole = sa_bo->list.prev;
+                       wake_up_locked(&sa_manager->queue);
+               }
+       }
        list_del_init(&sa_bo->list);
-       spin_unlock_irqrestore(&sa_bo->manager->lock, flags);
+       spin_unlock_irqrestore(&sa_manager->queue.lock, flags);
 }
-- 
1.7.7.6

Reply via email to