From: Jerome Glisse <jgli...@redhat.com>

wakequeue is use in case we want to wait until we got something
that allow to allocate the object.

Signed-off-by: Christian K?nig <deathsimple at vodafone.de>
Signed-off-by: Jerome Glisse <jglisse at redhat.com>
---
 drivers/gpu/drm/radeon/radeon.h        |    1 +
 drivers/gpu/drm/radeon/radeon_gart.c   |    2 +-
 drivers/gpu/drm/radeon/radeon_object.h |    2 +-
 drivers/gpu/drm/radeon/radeon_ring.c   |    6 +++++-
 drivers/gpu/drm/radeon/radeon_sa.c     |   32 ++++++++++++++++++++++++++++++--
 5 files changed, 38 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index ad12ef8..8a6dd46 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -382,6 +382,7 @@ struct radeon_bo_list {
  */
 struct radeon_sa_manager {
        struct mutex            mutex;
+       wait_queue_head_t       queue;
        struct radeon_bo        *bo;
        struct list_head        sa_bo;
        unsigned                size;
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c 
b/drivers/gpu/drm/radeon/radeon_gart.c
index c58a036..7af4ff9 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -395,7 +395,7 @@ int radeon_vm_bind(struct radeon_device *rdev, struct 
radeon_vm *vm)
 retry:
        r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, &vm->sa_bo,
                             RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8),
-                            RADEON_GPU_PAGE_SIZE);
+                            RADEON_GPU_PAGE_SIZE, false);
        if (r) {
                if (list_empty(&rdev->vm_manager.lru_vm)) {
                        return r;
diff --git a/drivers/gpu/drm/radeon/radeon_object.h 
b/drivers/gpu/drm/radeon/radeon_object.h
index d9b9333..85f33d9 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -158,7 +158,7 @@ extern int radeon_sa_bo_manager_suspend(struct 
radeon_device *rdev,
 extern int radeon_sa_bo_new(struct radeon_device *rdev,
                            struct radeon_sa_manager *sa_manager,
                            struct radeon_sa_bo *sa_bo,
-                           unsigned size, unsigned align);
+                           unsigned size, unsigned align, bool block);
 extern void radeon_sa_bo_free(struct radeon_device *rdev,
                              struct radeon_sa_bo *sa_bo);
 #if defined(CONFIG_DEBUG_FS)
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c 
b/drivers/gpu/drm/radeon/radeon_ring.c
index 1d9bce9..f8ecb3e 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -124,7 +124,7 @@ retry:
                if (rdev->ib_pool.ibs[idx].fence == NULL) {
                        r = radeon_sa_bo_new(rdev, &rdev->ib_pool.sa_manager,
                                             &rdev->ib_pool.ibs[idx].sa_bo,
-                                            size, 256);
+                                            size, 256, false);
                        if (!r) {
                                *ib = &rdev->ib_pool.ibs[idx];
                                (*ib)->ptr = rdev->ib_pool.sa_manager.cpu_ptr;
@@ -222,6 +222,10 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
                return 0;
        }

+       /* copy over the sa_manager we have to do this to silence
+        * kernel possible deadlock detection code to complain about
+        * a false positive
+        */
        rdev->ib_pool.sa_manager = tmp;
        INIT_LIST_HEAD(&rdev->ib_pool.sa_manager.sa_bo);
        for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
diff --git a/drivers/gpu/drm/radeon/radeon_sa.c 
b/drivers/gpu/drm/radeon/radeon_sa.c
index 8c0b3e6..f7b20b1 100644
--- a/drivers/gpu/drm/radeon/radeon_sa.c
+++ b/drivers/gpu/drm/radeon/radeon_sa.c
@@ -38,6 +38,7 @@ int radeon_sa_bo_manager_init(struct radeon_device *rdev,
        int r;

        mutex_init(&sa_manager->mutex);
+       init_waitqueue_head(&sa_manager->queue);
        sa_manager->bo = NULL;
        sa_manager->size = size;
        sa_manager->shole_size = 0;
@@ -61,6 +62,8 @@ void radeon_sa_bo_manager_fini(struct radeon_device *rdev,
 {
        struct radeon_sa_bo *sa_bo, *tmp;

+       wake_up_all(&sa_manager->queue);
+       mutex_lock(&sa_manager->mutex);
        if (!list_empty(&sa_manager->sa_bo)) {
                dev_err(rdev->dev, "sa_manager is not empty, clearing 
anyway\n");
        }
@@ -69,6 +72,7 @@ void radeon_sa_bo_manager_fini(struct radeon_device *rdev,
        }
        radeon_bo_unref(&sa_manager->bo);
        sa_manager->size = 0;
+       mutex_unlock(&sa_manager->mutex);
 }

 int radeon_sa_bo_manager_start(struct radeon_device *rdev,
@@ -117,6 +121,17 @@ int radeon_sa_bo_manager_suspend(struct radeon_device 
*rdev,
        return r;
 }

+static inline unsigned radeon_sa_bo_max_free(struct radeon_sa_manager *m,
+                                            unsigned align)
+{
+       unsigned start, end, wasted;
+
+       wasted = (align - (sa_manager->ehole_offset % align)) % align;
+       end = wasted > sa_manager->ehole_size ? 0 : sa_manager->ehole_size - 
wasted;
+       start = sa_manager->shole_size;
+       return start < end ? end : start;
+}
+
 /*
  * Principe is simple, we keep a list of sub allocation in offset
  * order (first entry has offset == 0, last entry has the highest
@@ -135,13 +150,14 @@ int radeon_sa_bo_manager_suspend(struct radeon_device 
*rdev,
 int radeon_sa_bo_new(struct radeon_device *rdev,
                     struct radeon_sa_manager *sa_manager,
                     struct radeon_sa_bo *sa_bo,
-                    unsigned size, unsigned align)
+                    unsigned size, unsigned align, bool block)
 {
        unsigned offset = 0, wasted = 0;

        BUG_ON(align > RADEON_GPU_PAGE_SIZE);
        BUG_ON(size > sa_manager->size);

+retry:
        mutex_lock(&sa_manager->mutex);

        wasted = (align - (sa_manager->ehole_offset % align)) % align;
@@ -161,8 +177,20 @@ int radeon_sa_bo_new(struct radeon_device *rdev,
                offset = 0;
                goto out;
        }
-
        mutex_unlock(&sa_manager->mutex);
+
+       if (block) {
+               /* failed to find something big enough, wait
+                *  for the biggest hole to increase in size
+                */
+               r = wait_event_interruptible(sa_manager->queue,
+                                               
radeon_sa_bo_max_free(sa_manager, align) >= size);
+               if (r) {
+                       return r;
+               }
+               goto retry;
+       }
+
        return -ENOMEM;

 out:
-- 
1.7.7.6

Reply via email to