From: Marek Olšák <marek.ol...@amd.com>

---
 src/gallium/drivers/radeon/r600_buffer_common.c | 16 ++++++++++------
 src/gallium/drivers/radeon/r600_pipe_common.h   |  4 ++--
 src/gallium/drivers/radeon/r600_texture.c       | 20 ++++++++++----------
 3 files changed, 22 insertions(+), 18 deletions(-)

diff --git a/src/gallium/drivers/radeon/r600_buffer_common.c 
b/src/gallium/drivers/radeon/r600_buffer_common.c
index 5d984ea..b57632e 100644
--- a/src/gallium/drivers/radeon/r600_buffer_common.c
+++ b/src/gallium/drivers/radeon/r600_buffer_common.c
@@ -20,21 +20,20 @@
  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  * USE OR OTHER DEALINGS IN THE SOFTWARE.
  *
  * Authors:
  *      Marek Olšák
  */
 
 #include "r600_cs.h"
 #include "util/u_memory.h"
 #include "util/u_upload_mgr.h"
-#include "util/u_threaded_context.h"
 #include <inttypes.h>
 #include <stdio.h>
 
 bool r600_rings_is_buffer_referenced(struct r600_common_context *ctx,
                                     struct pb_buffer *buf,
                                     enum radeon_bo_usage usage)
 {
        if (ctx->ws->cs_is_buffer_referenced(ctx->gfx.cs, buf, usage)) {
                return true;
        }
@@ -232,41 +231,42 @@ bool r600_alloc_resource(struct r600_common_screen 
*rscreen,
                        res->buf->size);
        }
        return true;
 }
 
 static void r600_buffer_destroy(struct pipe_screen *screen,
                                struct pipe_resource *buf)
 {
        struct r600_resource *rbuffer = r600_resource(buf);
 
+       threaded_resource_deinit(buf);
        util_range_destroy(&rbuffer->valid_buffer_range);
        pb_reference(&rbuffer->buf, NULL);
        FREE(rbuffer);
 }
 
 static bool
 r600_invalidate_buffer(struct r600_common_context *rctx,
                       struct r600_resource *rbuffer)
 {
        /* Shared buffers can't be reallocated. */
-       if (rbuffer->is_shared)
+       if (rbuffer->b.is_shared)
                return false;
 
        /* Sparse buffers can't be reallocated. */
        if (rbuffer->flags & RADEON_FLAG_SPARSE)
                return false;
 
        /* In AMD_pinned_memory, the user pointer association only gets
         * broken when the buffer is explicitly re-allocated.
         */
-       if (rctx->ws->buffer_is_user_ptr(rbuffer->buf))
+       if (rbuffer->b.is_user_ptr)
                return false;
 
        /* Check if mapping this buffer would cause waiting for the GPU. */
        if (r600_rings_is_buffer_referenced(rctx, rbuffer->buf, 
RADEON_USAGE_READWRITE) ||
            !rctx->ws->buffer_wait(rbuffer->buf, 0, RADEON_USAGE_READWRITE)) {
                rctx->invalidate_buffer(&rctx->b, &rbuffer->b.b);
        } else {
                util_range_set_empty(&rbuffer->valid_buffer_range);
        }
 
@@ -342,29 +342,29 @@ static void *r600_buffer_transfer_map(struct pipe_context 
*ctx,
         *
         *     4) Is glMapBuffer on a shared buffer guaranteed to return the
         *        same system address which was specified at creation time?
         *
         *        RESOLVED: NO. The GL implementation might return a different
         *        virtual mapping of that memory, although the same physical
         *        page will be used.
         *
         * So don't ever use staging buffers.
         */
-       if (rscreen->ws->buffer_is_user_ptr(rbuffer->buf))
+       if (rbuffer->b.is_user_ptr)
                usage |= PIPE_TRANSFER_PERSISTENT;
 
        /* See if the buffer range being mapped has never been initialized,
         * in which case it can be mapped unsynchronized. */
        if (!(usage & (PIPE_TRANSFER_UNSYNCHRONIZED |
                       TC_TRANSFER_MAP_IGNORE_VALID_RANGE)) &&
            usage & PIPE_TRANSFER_WRITE &&
-           !rbuffer->is_shared &&
+           !rbuffer->b.is_shared &&
            !util_ranges_intersect(&rbuffer->valid_buffer_range, box->x, box->x 
+ box->width)) {
                usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
        }
 
        /* If discarding the entire range, discard the whole resource instead. 
*/
        if (usage & PIPE_TRANSFER_DISCARD_RANGE &&
            box->x == 0 && box->width == resource->width0) {
                usage |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
        }
 
@@ -558,25 +558,27 @@ r600_alloc_buffer_struct(struct pipe_screen *screen,
                         const struct pipe_resource *templ)
 {
        struct r600_resource *rbuffer;
 
        rbuffer = MALLOC_STRUCT(r600_resource);
 
        rbuffer->b.b = *templ;
        rbuffer->b.b.next = NULL;
        pipe_reference_init(&rbuffer->b.b.reference, 1);
        rbuffer->b.b.screen = screen;
+
        rbuffer->b.vtbl = &r600_buffer_vtbl;
+       threaded_resource_init(&rbuffer->b.b);
+
        rbuffer->buf = NULL;
        rbuffer->bind_history = 0;
        rbuffer->TC_L2_dirty = false;
-       rbuffer->is_shared = false;
        util_range_init(&rbuffer->valid_buffer_range);
        return rbuffer;
 }
 
 struct pipe_resource *r600_buffer_create(struct pipe_screen *screen,
                                         const struct pipe_resource *templ,
                                         unsigned alignment)
 {
        struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
        struct r600_resource *rbuffer = r600_alloc_buffer_struct(screen, templ);
@@ -620,21 +622,23 @@ struct pipe_resource *
 r600_buffer_from_user_memory(struct pipe_screen *screen,
                             const struct pipe_resource *templ,
                             void *user_memory)
 {
        struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
        struct radeon_winsys *ws = rscreen->ws;
        struct r600_resource *rbuffer = r600_alloc_buffer_struct(screen, templ);
 
        rbuffer->domains = RADEON_DOMAIN_GTT;
        rbuffer->flags = 0;
+       rbuffer->b.is_user_ptr = true;
        util_range_add(&rbuffer->valid_buffer_range, 0, templ->width0);
+       util_range_add(&rbuffer->b.valid_buffer_range, 0, templ->width0);
 
        /* Convert a user pointer to a buffer. */
        rbuffer->buf = ws->buffer_from_ptr(ws, user_memory, templ->width0);
        if (!rbuffer->buf) {
                FREE(rbuffer);
                return NULL;
        }
 
        if (rscreen->info.has_virtual_memory)
                rbuffer->gpu_address =
diff --git a/src/gallium/drivers/radeon/r600_pipe_common.h 
b/src/gallium/drivers/radeon/r600_pipe_common.h
index 51d797a..3449786 100644
--- a/src/gallium/drivers/radeon/r600_pipe_common.h
+++ b/src/gallium/drivers/radeon/r600_pipe_common.h
@@ -38,20 +38,21 @@
 
 #include "radeon/radeon_winsys.h"
 
 #include "util/disk_cache.h"
 #include "util/u_blitter.h"
 #include "util/list.h"
 #include "util/u_range.h"
 #include "util/slab.h"
 #include "util/u_suballoc.h"
 #include "util/u_transfer.h"
+#include "util/u_threaded_context.h"
 
 #define ATI_VENDOR_ID 0x1002
 
 #define R600_RESOURCE_FLAG_TRANSFER            (PIPE_RESOURCE_FLAG_DRV_PRIV << 
0)
 #define R600_RESOURCE_FLAG_FLUSHED_DEPTH       (PIPE_RESOURCE_FLAG_DRV_PRIV << 
1)
 #define R600_RESOURCE_FLAG_FORCE_TILING                
(PIPE_RESOURCE_FLAG_DRV_PRIV << 2)
 #define R600_RESOURCE_FLAG_DISABLE_DCC         (PIPE_RESOURCE_FLAG_DRV_PRIV << 
3)
 #define R600_RESOURCE_FLAG_UNMAPPABLE          (PIPE_RESOURCE_FLAG_DRV_PRIV << 
4)
 
 #define R600_CONTEXT_STREAMOUT_FLUSH           (1u << 0)
@@ -133,21 +134,21 @@ struct r600_perfcounters;
 struct tgsi_shader_info;
 struct r600_qbo_state;
 
 void radeon_shader_binary_init(struct ac_shader_binary *b);
 void radeon_shader_binary_clean(struct ac_shader_binary *b);
 
 /* Only 32-bit buffer allocations are supported, gallium doesn't support more
  * at the moment.
  */
 struct r600_resource {
-       struct u_resource               b;
+       struct threaded_resource        b;
 
        /* Winsys objects. */
        struct pb_buffer                *buf;
        uint64_t                        gpu_address;
        /* Memory usage if the buffer placement is optimal. */
        uint64_t                        vram_usage;
        uint64_t                        gart_usage;
 
        /* Resource properties. */
        uint64_t                        bo_size;
@@ -172,21 +173,20 @@ struct r600_resource {
         * flag and flush the cache before using the buffer.
         *
         * For example, TC L2 must be flushed if a buffer which has been
         * modified by a shader store instruction is about to be used as
         * an index buffer. The reason is that VGT DMA index fetching doesn't
         * use TC L2.
         */
        bool                            TC_L2_dirty;
 
        /* Whether the resource has been exported via resource_get_handle. */
-       bool                            is_shared;
        unsigned                        external_usage; /* PIPE_HANDLE_USAGE_* 
*/
 };
 
 struct r600_transfer {
        struct pipe_transfer            transfer;
        struct r600_resource            *staging;
        unsigned                        offset;
 };
 
 struct r600_fmask_info {
diff --git a/src/gallium/drivers/radeon/r600_texture.c 
b/src/gallium/drivers/radeon/r600_texture.c
index 449d7bc..3bcf966 100644
--- a/src/gallium/drivers/radeon/r600_texture.c
+++ b/src/gallium/drivers/radeon/r600_texture.c
@@ -382,21 +382,21 @@ static void r600_texture_discard_cmask(struct 
r600_common_screen *rscreen,
 
        /* Notify all contexts about the change. */
        p_atomic_inc(&rscreen->dirty_tex_counter);
        p_atomic_inc(&rscreen->compressed_colortex_counter);
 }
 
 static bool r600_can_disable_dcc(struct r600_texture *rtex)
 {
        /* We can't disable DCC if it can be written by another process. */
        return rtex->dcc_offset &&
-              (!rtex->resource.is_shared ||
+              (!rtex->resource.b.is_shared ||
                !(rtex->resource.external_usage & PIPE_HANDLE_USAGE_WRITE));
 }
 
 static bool r600_texture_discard_dcc(struct r600_common_screen *rscreen,
                                     struct r600_texture *rtex)
 {
        if (!r600_can_disable_dcc(rtex))
                return false;
 
        assert(rtex->dcc_separate_buffer == NULL);
@@ -459,21 +459,21 @@ static void r600_degrade_tile_mode_to_linear(struct 
r600_common_context *rctx,
        struct r600_texture *new_tex;
        struct pipe_resource templ = rtex->resource.b.b;
        unsigned i;
 
        templ.bind |= PIPE_BIND_LINEAR;
 
        /* r600g doesn't react to dirty_tex_descriptor_counter */
        if (rctx->chip_class < SI)
                return;
 
-       if (rtex->resource.is_shared ||
+       if (rtex->resource.b.is_shared ||
            rtex->surface.is_linear)
                return;
 
        /* This fails with MSAA, depth, and compressed textures. */
        if (r600_choose_tiling(rctx->screen, &templ) !=
            RADEON_SURF_MODE_LINEAR_ALIGNED)
                return;
 
        new_tex = (struct r600_texture*)screen->resource_create(screen, &templ);
        if (!new_tex)
@@ -563,39 +563,39 @@ static boolean r600_texture_get_handle(struct 
pipe_screen* screen,
                        r600_eliminate_fast_color_clear(rctx, rtex);
 
                        /* Disable CMASK if flush_resource isn't going
                         * to be called.
                         */
                        if (rtex->cmask.size)
                                r600_texture_discard_cmask(rscreen, rtex);
                }
 
                /* Set metadata. */
-               if (!res->is_shared || update_metadata) {
+               if (!res->b.is_shared || update_metadata) {
                        r600_texture_init_metadata(rscreen, rtex, &metadata);
                        if (rscreen->query_opaque_metadata)
                                rscreen->query_opaque_metadata(rscreen, rtex,
                                                               &metadata);
 
                        rscreen->ws->buffer_set_metadata(res->buf, &metadata);
                }
        }
 
-       if (res->is_shared) {
+       if (res->b.is_shared) {
                /* USAGE_EXPLICIT_FLUSH must be cleared if at least one user
                 * doesn't set it.
                 */
                res->external_usage |= usage & 
~PIPE_HANDLE_USAGE_EXPLICIT_FLUSH;
                if (!(usage & PIPE_HANDLE_USAGE_EXPLICIT_FLUSH))
                        res->external_usage &= 
~PIPE_HANDLE_USAGE_EXPLICIT_FLUSH;
        } else {
-               res->is_shared = true;
+               res->b.is_shared = true;
                res->external_usage = usage;
        }
 
        if (rscreen->chip_class >= GFX9) {
                offset = rtex->surface.u.gfx9.surf_offset;
                stride = rtex->surface.u.gfx9.surf_pitch *
                         rtex->surface.bpe;
                slice_size = rtex->surface.u.gfx9.surf_slice_size;
        } else {
                offset = rtex->surface.u.legacy.level[0].offset;
@@ -1425,21 +1425,21 @@ static struct pipe_resource 
*r600_texture_from_handle(struct pipe_screen *screen
        r = r600_init_surface(rscreen, &surface, templ, array_mode, stride,
                              offset, true, is_scanout, false, false);
        if (r) {
                return NULL;
        }
 
        rtex = r600_texture_create_object(screen, templ, buf, &surface);
        if (!rtex)
                return NULL;
 
-       rtex->resource.is_shared = true;
+       rtex->resource.b.is_shared = true;
        rtex->resource.external_usage = usage;
 
        if (rscreen->apply_opaque_metadata)
                rscreen->apply_opaque_metadata(rscreen, rtex, &metadata);
 
        /* Validate that addrlib arrived at the same surface parameters. */
        if (rscreen->chip_class >= GFX9) {
                assert(metadata.u.gfx9.swizzle_mode == 
surface.u.gfx9.surf.swizzle_mode);
        }
 
@@ -1543,21 +1543,21 @@ static void r600_init_temp_resource_from_box(struct 
pipe_resource *res,
        }
 }
 
 static bool r600_can_invalidate_texture(struct r600_common_screen *rscreen,
                                        struct r600_texture *rtex,
                                        unsigned transfer_usage,
                                        const struct pipe_box *box)
 {
        /* r600g doesn't react to dirty_tex_descriptor_counter */
        return rscreen->chip_class >= SI &&
-               !rtex->resource.is_shared &&
+               !rtex->resource.b.is_shared &&
                !(transfer_usage & PIPE_TRANSFER_READ) &&
                rtex->resource.b.b.last_level == 0 &&
                util_texrange_covers_whole_level(&rtex->resource.b.b, 0,
                                                 box->x, box->y, box->z,
                                                 box->width, box->height,
                                                 box->depth);
 }
 
 static void r600_texture_invalidate_storage(struct r600_common_context *rctx,
                                            struct r600_texture *rtex)
@@ -2250,21 +2250,21 @@ static bool vi_should_enable_separate_dcc(struct 
r600_texture *tex)
        return tex->ps_draw_ratio + tex->num_slow_clears >= 5;
 }
 
 /* Called by fast clear. */
 static void vi_separate_dcc_try_enable(struct r600_common_context *rctx,
                                       struct r600_texture *tex)
 {
        /* The intent is to use this with shared displayable back buffers,
         * but it's not strictly limited only to them.
         */
-       if (!tex->resource.is_shared ||
+       if (!tex->resource.b.is_shared ||
            !(tex->resource.external_usage & PIPE_HANDLE_USAGE_EXPLICIT_FLUSH) 
||
            tex->resource.b.b.target != PIPE_TEXTURE_2D ||
            tex->resource.b.b.last_level > 0 ||
            !tex->surface.dcc_size)
                return;
 
        if (tex->dcc_offset)
                return; /* already enabled */
 
        /* Enable the DCC stat gathering. */
@@ -2517,21 +2517,21 @@ void vi_dcc_clear_level(struct r600_common_context 
*rctx,
                           clear_value, R600_COHERENCY_CB_META);
 }
 
 /* Set the same micro tile mode as the destination of the last MSAA resolve.
  * This allows hitting the MSAA resolve fast path, which requires that both
  * src and dst micro tile modes match.
  */
 static void si_set_optimal_micro_tile_mode(struct r600_common_screen *rscreen,
                                           struct r600_texture *rtex)
 {
-       if (rtex->resource.is_shared ||
+       if (rtex->resource.b.is_shared ||
            rtex->resource.b.b.nr_samples <= 1 ||
            rtex->surface.micro_tile_mode == 
rtex->last_msaa_resolve_target_micro_mode)
                return;
 
        assert(rscreen->chip_class >= GFX9 ||
               rtex->surface.u.legacy.level[0].mode == RADEON_SURF_MODE_2D);
        assert(rtex->resource.b.b.last_level == 0);
 
        if (rscreen->chip_class >= GFX9) {
                /* 4K or larger tiles only. 0 is linear. 1-3 are 256B tiles. */
@@ -2667,21 +2667,21 @@ void evergreen_do_fast_color_clear(struct 
r600_common_context *rctx,
 
                /* only supported on tiled surfaces */
                if (tex->surface.is_linear) {
                        continue;
                }
 
                /* shared textures can't use fast clear without an explicit 
flush,
                 * because there is no way to communicate the clear color among
                 * all clients
                 */
-               if (tex->resource.is_shared &&
+               if (tex->resource.b.is_shared &&
                    !(tex->resource.external_usage & 
PIPE_HANDLE_USAGE_EXPLICIT_FLUSH))
                        continue;
 
                /* fast color clear with 1D tiling doesn't work on old kernels 
and CIK */
                if (rctx->chip_class == CIK &&
                    tex->surface.u.legacy.level[0].mode == RADEON_SURF_MODE_1D 
&&
                    rctx->screen->info.drm_major == 2 &&
                    rctx->screen->info.drm_minor < 38) {
                        continue;
                }
-- 
2.7.4

_______________________________________________
mesa-dev mailing list
mesa-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/mesa-dev

Reply via email to