Looks better.

Reviewed-by: Samuel Pitoiset <samuel.pitoi...@gmail.com>

On 06/29/2017 09:47 PM, Marek Olšák wrote:
From: Marek Olšák <marek.ol...@amd.com>

---
  src/gallium/drivers/r300/r300_texture.c       | 2 +-
  src/gallium/drivers/radeon/r600_texture.c     | 2 +-
  src/gallium/drivers/radeon/radeon_winsys.h    | 2 +-
  src/gallium/winsys/amdgpu/drm/amdgpu_bo.c     | 6 +++---
  src/gallium/winsys/radeon/drm/radeon_drm_bo.c | 4 ++--
  src/gallium/winsys/radeon/drm/radeon_drm_cs.c | 2 +-
  6 files changed, 9 insertions(+), 9 deletions(-)

diff --git a/src/gallium/drivers/r300/r300_texture.c 
b/src/gallium/drivers/r300/r300_texture.c
index c202fbe..cdf9ccb 100644
--- a/src/gallium/drivers/r300/r300_texture.c
+++ b/src/gallium/drivers/r300/r300_texture.c
@@ -1112,21 +1112,21 @@ r300_texture_create_object(struct r300_screen *rscreen,
          tex->domain &= ~RADEON_DOMAIN_GTT;
      }
      /* Just fail if the texture is too large. */
      if (!tex->domain) {
          goto fail;
      }
/* Create the backing buffer if needed. */
      if (!tex->buf) {
          tex->buf = rws->buffer_create(rws, tex->tex.size_in_bytes, 2048,
-                                      tex->domain, RADEON_FLAG_HANDLE);
+                                      tex->domain, RADEON_FLAG_NO_SUBALLOC);
if (!tex->buf) {
              goto fail;
          }
      }
if (SCREEN_DBG_ON(rscreen, DBG_MSAA) && base->nr_samples > 1) {
          fprintf(stderr, "r300: %ix MSAA %s buffer created\n",
                  base->nr_samples,
                  util_format_is_depth_or_stencil(base->format) ? "depth" : 
"color");
diff --git a/src/gallium/drivers/radeon/r600_texture.c 
b/src/gallium/drivers/radeon/r600_texture.c
index 139ab13..c811d6a 100644
--- a/src/gallium/drivers/radeon/r600_texture.c
+++ b/src/gallium/drivers/radeon/r600_texture.c
@@ -1199,21 +1199,21 @@ r600_texture_create_object(struct pipe_screen *screen,
                        rtex->dcc_offset = align64(rtex->size, 
rtex->surface.dcc_alignment);
                        rtex->size = rtex->dcc_offset + rtex->surface.dcc_size;
                }
        }
/* Now create the backing buffer. */
        if (!buf) {
                r600_init_resource_fields(rscreen, resource, rtex->size,
                                          rtex->surface.surf_alignment);
- resource->flags |= RADEON_FLAG_HANDLE;
+               resource->flags |= RADEON_FLAG_NO_SUBALLOC;
if (!r600_alloc_resource(rscreen, resource)) {
                        FREE(rtex);
                        return NULL;
                }
        } else {
                resource->buf = buf;
                resource->gpu_address = 
rscreen->ws->buffer_get_virtual_address(resource->buf);
                resource->bo_size = buf->size;
                resource->bo_alignment = buf->alignment;
diff --git a/src/gallium/drivers/radeon/radeon_winsys.h 
b/src/gallium/drivers/radeon/radeon_winsys.h
index 247fff0..706188f 100644
--- a/src/gallium/drivers/radeon/radeon_winsys.h
+++ b/src/gallium/drivers/radeon/radeon_winsys.h
@@ -46,21 +46,21 @@ enum radeon_bo_layout {
  enum radeon_bo_domain { /* bitfield */
      RADEON_DOMAIN_GTT  = 2,
      RADEON_DOMAIN_VRAM = 4,
      RADEON_DOMAIN_VRAM_GTT = RADEON_DOMAIN_VRAM | RADEON_DOMAIN_GTT
  };
enum radeon_bo_flag { /* bitfield */
      RADEON_FLAG_GTT_WC =        (1 << 0),
      RADEON_FLAG_CPU_ACCESS =    (1 << 1),
      RADEON_FLAG_NO_CPU_ACCESS = (1 << 2),
-    RADEON_FLAG_HANDLE =        (1 << 3), /* the buffer must not be 
suballocated */
+    RADEON_FLAG_NO_SUBALLOC =   (1 << 3),
      RADEON_FLAG_SPARSE =        (1 << 4),
  };
enum radeon_bo_usage { /* bitfield */
      RADEON_USAGE_READ = 2,
      RADEON_USAGE_WRITE = 4,
      RADEON_USAGE_READWRITE = RADEON_USAGE_READ | RADEON_USAGE_WRITE,
/* The winsys ensures that the CS submission will be scheduled after
       * previously flushed CSs referencing this BO in a conflicting way.
diff --git a/src/gallium/winsys/amdgpu/drm/amdgpu_bo.c 
b/src/gallium/winsys/amdgpu/drm/amdgpu_bo.c
index 4017411..a86cc2c 100644
--- a/src/gallium/winsys/amdgpu/drm/amdgpu_bo.c
+++ b/src/gallium/winsys/amdgpu/drm/amdgpu_bo.c
@@ -683,21 +683,21 @@ sparse_backing_alloc(struct amdgpu_winsys_bo *bo, 
uint32_t *pstart_page, uint32_
assert(bo->u.sparse.num_backing_pages < DIV_ROUND_UP(bo->base.size, RADEON_SPARSE_PAGE_SIZE)); size = MIN3(bo->base.size / 16,
                    8 * 1024 * 1024,
                    bo->base.size - (uint64_t)bo->u.sparse.num_backing_pages * 
RADEON_SPARSE_PAGE_SIZE);
        size = MAX2(size, RADEON_SPARSE_PAGE_SIZE);
buf = amdgpu_bo_create(&bo->ws->base, size, RADEON_SPARSE_PAGE_SIZE,
                               bo->initial_domain,
-                             bo->u.sparse.flags | RADEON_FLAG_HANDLE);
+                             bo->u.sparse.flags | RADEON_FLAG_NO_SUBALLOC);
        if (!buf) {
           FREE(best_backing->chunks);
           FREE(best_backing);
           return NULL;
        }
/* We might have gotten a bigger buffer than requested via caching. */
        pages = buf->size / RADEON_SPARSE_PAGE_SIZE;
best_backing->bo = amdgpu_winsys_bo(buf);
@@ -1149,21 +1149,21 @@ amdgpu_bo_create(struct radeon_winsys *rws,
                   uint64_t size,
                   unsigned alignment,
                   enum radeon_bo_domain domain,
                   enum radeon_bo_flag flags)
  {
     struct amdgpu_winsys *ws = amdgpu_winsys(rws);
     struct amdgpu_winsys_bo *bo;
     unsigned usage = 0, pb_cache_bucket;
/* Sub-allocate small buffers from slabs. */
-   if (!(flags & (RADEON_FLAG_HANDLE | RADEON_FLAG_SPARSE)) &&
+   if (!(flags & (RADEON_FLAG_NO_SUBALLOC | RADEON_FLAG_SPARSE)) &&
         size <= (1 << AMDGPU_SLAB_MAX_SIZE_LOG2) &&
         alignment <= MAX2(1 << AMDGPU_SLAB_MIN_SIZE_LOG2, 
util_next_power_of_two(size))) {
        struct pb_slab_entry *entry;
        unsigned heap = 0;
if (flags & RADEON_FLAG_GTT_WC)
           heap |= 1;
        if (flags & RADEON_FLAG_CPU_ACCESS)
           heap |= 2;
        if (flags & ~(RADEON_FLAG_GTT_WC | RADEON_FLAG_CPU_ACCESS))
@@ -1205,21 +1205,21 @@ no_slab:
     if (flags & RADEON_FLAG_SPARSE) {
        assert(RADEON_SPARSE_PAGE_SIZE % alignment == 0);
        assert(!(flags & RADEON_FLAG_CPU_ACCESS));
flags |= RADEON_FLAG_NO_CPU_ACCESS; return amdgpu_bo_sparse_create(ws, size, domain, flags);
     }
/* This flag is irrelevant for the cache. */
-   flags &= ~RADEON_FLAG_HANDLE;
+   flags &= ~RADEON_FLAG_NO_SUBALLOC;
/* Align size to page size. This is the minimum alignment for normal
      * BOs. Aligning this here helps the cached bufmgr. Especially small BOs,
      * like constant/uniform buffers, can benefit from better and more reuse.
      */
     size = align64(size, ws->info.gart_page_size);
     alignment = align(alignment, ws->info.gart_page_size);
/* Only set one usage bit each for domains and flags, or the cache manager
      * might consider different sets of domains / flags compatible
diff --git a/src/gallium/winsys/radeon/drm/radeon_drm_bo.c 
b/src/gallium/winsys/radeon/drm/radeon_drm_bo.c
index 9bbf1b3..18d397d 100644
--- a/src/gallium/winsys/radeon/drm/radeon_drm_bo.c
+++ b/src/gallium/winsys/radeon/drm/radeon_drm_bo.c
@@ -936,21 +936,21 @@ radeon_winsys_bo_create(struct radeon_winsys *rws,
      struct radeon_bo *bo;
      unsigned usage = 0, pb_cache_bucket;
assert(!(flags & RADEON_FLAG_SPARSE)); /* not supported */ /* Only 32-bit sizes are supported. */
      if (size > UINT_MAX)
          return NULL;
/* Sub-allocate small buffers from slabs. */
-    if (!(flags & RADEON_FLAG_HANDLE) &&
+    if (!(flags & RADEON_FLAG_NO_SUBALLOC) &&
          size <= (1 << RADEON_SLAB_MAX_SIZE_LOG2) &&
          ws->info.has_virtual_memory &&
          alignment <= MAX2(1 << RADEON_SLAB_MIN_SIZE_LOG2, 
util_next_power_of_two(size))) {
          struct pb_slab_entry *entry;
          unsigned heap = 0;
if (flags & RADEON_FLAG_GTT_WC)
              heap |= 1;
          if (flags & RADEON_FLAG_CPU_ACCESS)
              heap |= 2;
@@ -984,21 +984,21 @@ radeon_winsys_bo_create(struct radeon_winsys *rws,
          bo = NULL;
          bo = container_of(entry, bo, u.slab.entry);
pipe_reference_init(&bo->base.reference, 1); return &bo->base;
      }
  no_slab:
/* This flag is irrelevant for the cache. */
-    flags &= ~RADEON_FLAG_HANDLE;
+    flags &= ~RADEON_FLAG_NO_SUBALLOC;
/* Align size to page size. This is the minimum alignment for normal
       * BOs. Aligning this here helps the cached bufmgr. Especially small BOs,
       * like constant/uniform buffers, can benefit from better and more reuse.
       */
      size = align(size, ws->info.gart_page_size);
      alignment = align(alignment, ws->info.gart_page_size);
/* Only set one usage bit each for domains and flags, or the cache manager
       * might consider different sets of domains / flags compatible
diff --git a/src/gallium/winsys/radeon/drm/radeon_drm_cs.c 
b/src/gallium/winsys/radeon/drm/radeon_drm_cs.c
index f59b539..5246053 100644
--- a/src/gallium/winsys/radeon/drm/radeon_drm_cs.c
+++ b/src/gallium/winsys/radeon/drm/radeon_drm_cs.c
@@ -744,21 +744,21 @@ static bool radeon_bo_is_referenced(struct 
radeon_winsys_cs *rcs,
  /* FENCES */
static struct pipe_fence_handle *
  radeon_cs_create_fence(struct radeon_winsys_cs *rcs)
  {
      struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
      struct pb_buffer *fence;
/* Create a fence, which is a dummy BO. */
      fence = cs->ws->base.buffer_create(&cs->ws->base, 1, 1,
-                                       RADEON_DOMAIN_GTT, RADEON_FLAG_HANDLE);
+                                       RADEON_DOMAIN_GTT, 
RADEON_FLAG_NO_SUBALLOC);
      if (!fence)
         return NULL;
/* Add the fence as a dummy relocation. */
      cs->ws->base.cs_add_buffer(rcs, fence,
                                RADEON_USAGE_READWRITE, RADEON_DOMAIN_GTT,
                                RADEON_PRIO_FENCE);
      return (struct pipe_fence_handle*)fence;
  }
_______________________________________________
mesa-dev mailing list
mesa-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/mesa-dev

Reply via email to