From: Christian K?nig <christian.koe...@amd.com>

The driver falls back to explicit synchronization as soon as
buffers move between clients or are moved by TTM.

Signed-off-by: Christian K?nig <christian.koenig at amd.com>
---
 drivers/gpu/drm/radeon/radeon.h     |  5 ++++
 drivers/gpu/drm/radeon/radeon_cs.c  | 49 ++++++++++++++++++++++++++++++++++---
 drivers/gpu/drm/radeon/radeon_gem.c |  1 +
 drivers/gpu/drm/radeon/radeon_ttm.c |  2 ++
 include/uapi/drm/radeon_drm.h       |  7 +++---
 5 files changed, 57 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index dbfd346..44a8eec 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -474,6 +474,9 @@ struct radeon_bo_va {
        struct radeon_bo                *bo;
 };

+#define RADEON_BO_OWNER_IMPLICIT_SYNC  (0l)
+#define RADEON_BO_OWNER_FIRST_CS       (~0l)
+
 struct radeon_bo {
        /* Protected by gem.mutex */
        struct list_head                list;
@@ -489,6 +492,7 @@ struct radeon_bo {
        u32                             tiling_flags;
        u32                             pitch;
        int                             surface_reg;
+       long                            owner;
        /* list of all virtual address to which this bo
         * is associated to
         */
@@ -1084,6 +1088,7 @@ struct radeon_cs_parser {
        struct radeon_cs_chunk  *chunk_relocs;
        struct radeon_cs_chunk  *chunk_flags;
        struct radeon_cs_chunk  *chunk_const_ib;
+       struct radeon_cs_chunk  *chunk_wait_for;
        struct radeon_ib        ib;
        struct radeon_ib        const_ib;
        void                    *track;
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c 
b/drivers/gpu/drm/radeon/radeon_cs.c
index 0c0f0b3..8f62698 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -183,7 +183,8 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser 
*p)
                }

                p->relocs[i].tv.bo = &p->relocs[i].robj->tbo;
-               p->relocs[i].tv.shared = !r->write_domain;
+               p->relocs[i].tv.shared = !r->write_domain ||
+                                        !!p->chunk_wait_for;
                p->relocs[i].handle = r->handle;

                radeon_cs_buckets_add(&buckets, &p->relocs[i].tv.head,
@@ -251,16 +252,40 @@ static int radeon_cs_get_ring(struct radeon_cs_parser *p, 
u32 ring, s32 priority

 static void radeon_cs_sync_rings(struct radeon_cs_parser *p)
 {
+       long owner = p->chunk_wait_for ? (long)p->filp :
+               RADEON_BO_OWNER_IMPLICIT_SYNC;
        int i;

+       if (p->chunk_wait_for) {
+               struct radeon_fpriv *fpriv = p->filp->driver_priv;
+
+               for (i = 0; i < p->chunk_wait_for->length_dw; ++i) {
+                       struct radeon_fence *fence;
+                       uint64_t *id;
+
+                       id = (uint64_t *)&p->chunk_wait_for->kdata[i];
+
+                       mutex_lock(&fpriv->seq_lock);
+                       fence = radeon_seq_query(fpriv->seq, *id);
+                       mutex_unlock(&fpriv->seq_lock);
+
+                       radeon_sync_fence(&p->ib.sync, fence);
+               }
+       }
+
        for (i = 0; i < p->nrelocs; i++) {
+               struct radeon_cs_reloc *reloc = &p->relocs[i];
                struct reservation_object *resv;

-               if (!p->relocs[i].robj)
+               if (!reloc->robj)
                        continue;

-               resv = p->relocs[i].robj->tbo.resv;
-               radeon_sync_resv(&p->ib.sync, resv, p->relocs[i].tv.shared);
+               if (reloc->robj->owner != owner &&
+                   reloc->robj->owner != RADEON_BO_OWNER_FIRST_CS)
+                       reloc->tv.shared = false;
+
+               resv = reloc->robj->tbo.resv;
+               radeon_sync_resv(&p->ib.sync, resv, reloc->tv.shared);
        }
 }

@@ -332,6 +357,10 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void 
*data)
                        if (p->chunks[i].length_dw == 0)
                                return -EINVAL;
                }
+               if (user_chunk.chunk_id == RADEON_CHUNK_ID_WAIT_FOR) {
+                       p->chunk_wait_for = &p->chunks[i];
+                       /* zero length wait for list is actually useful */
+               }

                size = p->chunks[i].length_dw;
                cdata = (void __user *)(unsigned long)user_chunk.chunk_data;
@@ -413,6 +442,18 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser 
*parser, int error, bo
        unsigned i;

        if (!error) {
+               long owner = parser->chunk_wait_for ? (long)parser->filp :
+                       RADEON_BO_OWNER_IMPLICIT_SYNC;
+
+               for (i = 0; i < parser->nrelocs; i++) {
+                       struct radeon_cs_reloc *reloc = &parser->relocs[i];
+
+                       if (!reloc->robj)
+                               continue;
+
+                       reloc->robj->owner = owner;
+               }
+
                if (parser->chunk_flags &&
                    parser->chunk_flags->length_dw > 4) {
                        struct radeon_fpriv *fpriv = parser->filp->driver_priv;
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c 
b/drivers/gpu/drm/radeon/radeon_gem.c
index c100aa6..fa6efff 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -81,6 +81,7 @@ retry:
        }
        *obj = &robj->gem_base;
        robj->pid = task_pid_nr(current);
+       robj->owner = RADEON_BO_OWNER_FIRST_CS;

        mutex_lock(&rdev->gem.mutex);
        list_add_tail(&robj->list, &rdev->gem.objects);
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c 
b/drivers/gpu/drm/radeon/radeon_ttm.c
index eca2ce6..52df6d9 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -230,6 +230,7 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
                        struct ttm_mem_reg *new_mem,
                        struct ttm_mem_reg *old_mem)
 {
+       struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
        struct radeon_device *rdev;
        uint64_t old_start, new_start;
        struct radeon_fence *fence;
@@ -275,6 +276,7 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
        if (IS_ERR(fence))
                return PTR_ERR(fence);

+       rbo->owner = RADEON_BO_OWNER_IMPLICIT_SYNC;
        r = ttm_bo_move_accel_cleanup(bo, &fence->base,
                                      evict, no_wait_gpu, new_mem);
        radeon_fence_unref(&fence);
diff --git a/include/uapi/drm/radeon_drm.h b/include/uapi/drm/radeon_drm.h
index 6b2b2e7..a34e3db 100644
--- a/include/uapi/drm/radeon_drm.h
+++ b/include/uapi/drm/radeon_drm.h
@@ -942,10 +942,11 @@ struct drm_radeon_gem_va {
        uint64_t                offset;
 };

-#define RADEON_CHUNK_ID_RELOCS 0x01
-#define RADEON_CHUNK_ID_IB     0x02
-#define RADEON_CHUNK_ID_FLAGS  0x03
+#define RADEON_CHUNK_ID_RELOCS         0x01
+#define RADEON_CHUNK_ID_IB             0x02
+#define RADEON_CHUNK_ID_FLAGS          0x03
 #define RADEON_CHUNK_ID_CONST_IB       0x04
+#define RADEON_CHUNK_ID_WAIT_FOR       0x05

 /* The first dword of RADEON_CHUNK_ID_FLAGS is a uint32 of these flags: */
 #define RADEON_CS_KEEP_TILING_FLAGS 0x01
-- 
1.9.1

Reply via email to