From: Marek Olšák <marek.ol...@amd.com> --- src/gallium/drivers/radeonsi/si_cp_dma.c | 62 ++++++++++++++---------- src/gallium/drivers/radeonsi/si_pipe.h | 5 ++ 2 files changed, 41 insertions(+), 26 deletions(-)
diff --git a/src/gallium/drivers/radeonsi/si_cp_dma.c b/src/gallium/drivers/radeonsi/si_cp_dma.c index 68090412088..86eb3529d9b 100644 --- a/src/gallium/drivers/radeonsi/si_cp_dma.c +++ b/src/gallium/drivers/radeonsi/si_cp_dma.c @@ -32,47 +32,48 @@ #define CP_DMA_CLEAR_PERF_THRESHOLD (32 * 1024) /* guess (clear is much slower) */ /* Set this if you want the ME to wait until CP DMA is done. * It should be set on the last CP DMA packet. */ #define CP_DMA_SYNC (1 << 0) /* Set this if the source data was used as a destination in a previous CP DMA * packet. It's for preventing a read-after-write (RAW) hazard between two * CP DMA packets. */ #define CP_DMA_RAW_WAIT (1 << 1) -#define CP_DMA_USE_L2 (1 << 2) /* CIK+ */ #define CP_DMA_CLEAR (1 << 3) #define CP_DMA_PFP_SYNC_ME (1 << 4) /* The max number of bytes that can be copied per packet. */ static inline unsigned cp_dma_max_byte_count(struct si_context *sctx) { unsigned max = sctx->chip_class >= GFX9 ? S_414_BYTE_COUNT_GFX9(~0u) : S_414_BYTE_COUNT_GFX6(~0u); /* make it aligned for optimal performance */ return max & ~(SI_CPDMA_ALIGNMENT - 1); } /* Emit a CP DMA packet to do a copy from one buffer to another, or to clear * a buffer. The size must fit in bits [20:0]. If CP_DMA_CLEAR is set, src_va is a 32-bit * clear value. */ static void si_emit_cp_dma(struct si_context *sctx, uint64_t dst_va, - uint64_t src_va, unsigned size, unsigned flags) + uint64_t src_va, unsigned size, unsigned flags, + enum si_cache_policy cache_policy) { struct radeon_cmdbuf *cs = sctx->gfx_cs; uint32_t header = 0, command = 0; assert(size <= cp_dma_max_byte_count(sctx)); + assert(sctx->chip_class != SI || cache_policy == L2_BYPASS); if (sctx->chip_class >= GFX9) command |= S_414_BYTE_COUNT_GFX9(size); else command |= S_414_BYTE_COUNT_GFX6(size); /* Sync flags. */ if (flags & CP_DMA_SYNC) header |= S_411_CP_SYNC(1); else { @@ -82,26 +83,26 @@ static void si_emit_cp_dma(struct si_context *sctx, uint64_t dst_va, command |= S_414_DISABLE_WR_CONFIRM_GFX6(1); } if (flags & CP_DMA_RAW_WAIT) command |= S_414_RAW_WAIT(1); /* Src and dst flags. */ if (sctx->chip_class >= GFX9 && !(flags & CP_DMA_CLEAR) && src_va == dst_va) header |= S_411_DST_SEL(V_411_NOWHERE); /* prefetch only */ - else if (flags & CP_DMA_USE_L2) + else if (sctx->chip_class >= CIK && cache_policy != L2_BYPASS) header |= S_411_DST_SEL(V_411_DST_ADDR_TC_L2); if (flags & CP_DMA_CLEAR) header |= S_411_SRC_SEL(V_411_DATA); - else if (flags & CP_DMA_USE_L2) + else if (sctx->chip_class >= CIK && cache_policy != L2_BYPASS) header |= S_411_SRC_SEL(V_411_SRC_ADDR_TC_L2); if (sctx->chip_class >= CIK) { radeon_emit(cs, PKT3(PKT3_DMA_DATA, 5, 0)); radeon_emit(cs, header); radeon_emit(cs, src_va); /* SRC_ADDR_LO [31:0] */ radeon_emit(cs, src_va >> 32); /* SRC_ADDR_HI [31:0] */ radeon_emit(cs, dst_va); /* DST_ADDR_LO [31:0] */ radeon_emit(cs, dst_va >> 32); /* DST_ADDR_HI [31:0] */ radeon_emit(cs, command); @@ -128,45 +129,50 @@ static void si_emit_cp_dma(struct si_context *sctx, uint64_t dst_va, } void si_cp_dma_wait_for_idle(struct si_context *sctx) { /* Issue a dummy DMA that copies zero bytes. * * The DMA engine will see that there's no work to do and skip this * DMA request, however, the CP will see the sync flag and still wait * for all DMAs to complete. */ - si_emit_cp_dma(sctx, 0, 0, 0, CP_DMA_SYNC); + si_emit_cp_dma(sctx, 0, 0, 0, CP_DMA_SYNC, L2_BYPASS); } -static unsigned get_flush_flags(struct si_context *sctx, enum si_coherency coher) +static unsigned get_flush_flags(struct si_context *sctx, enum si_coherency coher, + enum si_cache_policy cache_policy) { switch (coher) { default: case SI_COHERENCY_NONE: return 0; case SI_COHERENCY_SHADER: + assert(sctx->chip_class != SI || cache_policy == L2_BYPASS); return SI_CONTEXT_INV_SMEM_L1 | SI_CONTEXT_INV_VMEM_L1 | - (sctx->chip_class == SI ? SI_CONTEXT_INV_GLOBAL_L2 : 0); + (cache_policy == L2_BYPASS ? SI_CONTEXT_INV_GLOBAL_L2 : 0); case SI_COHERENCY_CB_META: + assert(sctx->chip_class >= GFX9 ? cache_policy != L2_BYPASS : + cache_policy == L2_BYPASS); return SI_CONTEXT_FLUSH_AND_INV_CB; } } -static unsigned get_tc_l2_flag(struct si_context *sctx, enum si_coherency coher) +static enum si_cache_policy get_cache_policy(struct si_context *sctx, + enum si_coherency coher) { if ((sctx->chip_class >= GFX9 && coher == SI_COHERENCY_CB_META) || (sctx->chip_class >= CIK && coher == SI_COHERENCY_SHADER)) - return CP_DMA_USE_L2; + return L2_LRU; - return 0; + return L2_BYPASS; } static void si_cp_dma_prepare(struct si_context *sctx, struct pipe_resource *dst, struct pipe_resource *src, unsigned byte_count, uint64_t remaining_size, unsigned user_flags, enum si_coherency coher, bool *is_first, unsigned *packet_flags) { /* Fast exit for a CPDMA prefetch. */ if ((user_flags & SI_CPDMA_SKIP_ALL) == SI_CPDMA_SKIP_ALL) { @@ -217,22 +223,22 @@ static void si_cp_dma_prepare(struct si_context *sctx, struct pipe_resource *dst *packet_flags |= CP_DMA_PFP_SYNC_ME; } } void si_clear_buffer(struct si_context *sctx, struct pipe_resource *dst, uint64_t offset, uint64_t size, unsigned value, enum si_coherency coher, enum si_method xfer) { struct radeon_winsys *ws = sctx->ws; struct r600_resource *rdst = r600_resource(dst); - unsigned tc_l2_flag = get_tc_l2_flag(sctx, coher); - unsigned flush_flags = get_flush_flags(sctx, coher); + enum si_cache_policy cache_policy = get_cache_policy(sctx, coher); + unsigned flush_flags = get_flush_flags(sctx, coher, cache_policy); uint64_t dma_clear_size; bool is_first = true; if (!size) return; dma_clear_size = size & ~3ull; /* Mark the buffer range of destination as valid (initialized), * so that transfer_map knows it should wait for the GPU when mapping @@ -267,33 +273,34 @@ void si_clear_buffer(struct si_context *sctx, struct pipe_resource *dst, offset += dma_clear_size; size -= dma_clear_size; /* Flush the caches. */ sctx->flags |= SI_CONTEXT_PS_PARTIAL_FLUSH | SI_CONTEXT_CS_PARTIAL_FLUSH | flush_flags; while (dma_clear_size) { unsigned byte_count = MIN2(dma_clear_size, cp_dma_max_byte_count(sctx)); - unsigned dma_flags = tc_l2_flag | CP_DMA_CLEAR; + unsigned dma_flags = CP_DMA_CLEAR; si_cp_dma_prepare(sctx, dst, NULL, byte_count, dma_clear_size, 0, coher, &is_first, &dma_flags); /* Emit the clear packet. */ - si_emit_cp_dma(sctx, va, value, byte_count, dma_flags); + si_emit_cp_dma(sctx, va, value, byte_count, dma_flags, + cache_policy); dma_clear_size -= byte_count; va += byte_count; } - if (tc_l2_flag) + if (cache_policy != L2_BYPASS) rdst->TC_L2_dirty = true; /* If it's not a framebuffer fast clear... */ if (coher == SI_COHERENCY_SHADER) sctx->num_cp_dma_calls++; } if (size) { /* Handle non-dword alignment. * @@ -367,20 +374,21 @@ static void si_pipe_clear_buffer(struct pipe_context *ctx, } /** * Realign the CP DMA engine. This must be done after a copy with an unaligned * size. * * \param size Remaining size to the CP DMA alignment. */ static void si_cp_dma_realign_engine(struct si_context *sctx, unsigned size, unsigned user_flags, enum si_coherency coher, + enum si_cache_policy cache_policy, bool *is_first) { uint64_t va; unsigned dma_flags = 0; unsigned scratch_size = SI_CPDMA_ALIGNMENT * 2; assert(size < SI_CPDMA_ALIGNMENT); /* Use the scratch buffer as the dummy buffer. The 3D engine should be * idle at this point. @@ -397,39 +405,39 @@ static void si_cp_dma_realign_engine(struct si_context *sctx, unsigned size, return; si_mark_atom_dirty(sctx, &sctx->atoms.s.scratch_state); } si_cp_dma_prepare(sctx, &sctx->scratch_buffer->b.b, &sctx->scratch_buffer->b.b, size, size, user_flags, coher, is_first, &dma_flags); va = sctx->scratch_buffer->gpu_address; - si_emit_cp_dma(sctx, va, va + SI_CPDMA_ALIGNMENT, size, dma_flags); + si_emit_cp_dma(sctx, va, va + SI_CPDMA_ALIGNMENT, size, dma_flags, + cache_policy); } /** * Do memcpy between buffers using CP DMA. * * \param user_flags bitmask of SI_CPDMA_* */ void si_copy_buffer(struct si_context *sctx, struct pipe_resource *dst, struct pipe_resource *src, uint64_t dst_offset, uint64_t src_offset, unsigned size, unsigned user_flags) { uint64_t main_dst_offset, main_src_offset; unsigned skipped_size = 0; unsigned realign_size = 0; enum si_coherency coher = SI_COHERENCY_SHADER; - unsigned tc_l2_flag = get_tc_l2_flag(sctx, coher); - unsigned flush_flags = get_flush_flags(sctx, coher); + enum si_cache_policy cache_policy = get_cache_policy(sctx, coher); bool is_first = true; if (!size) return; if (dst != src || dst_offset != src_offset) { /* Mark the buffer range of destination as valid (initialized), * so that transfer_map knows it should wait for the GPU when mapping * that range. */ util_range_add(&r600_resource(dst)->valid_buffer_range, dst_offset, @@ -455,63 +463,65 @@ void si_copy_buffer(struct si_context *sctx, */ if (src_offset % SI_CPDMA_ALIGNMENT) { skipped_size = SI_CPDMA_ALIGNMENT - (src_offset % SI_CPDMA_ALIGNMENT); /* The main part will be skipped if the size is too small. */ skipped_size = MIN2(skipped_size, size); size -= skipped_size; } } /* Flush the caches. */ - if (!(user_flags & SI_CPDMA_SKIP_GFX_SYNC)) + if (!(user_flags & SI_CPDMA_SKIP_GFX_SYNC)) { sctx->flags |= SI_CONTEXT_PS_PARTIAL_FLUSH | - SI_CONTEXT_CS_PARTIAL_FLUSH | flush_flags; + SI_CONTEXT_CS_PARTIAL_FLUSH | + get_flush_flags(sctx, coher, cache_policy); + } /* This is the main part doing the copying. Src is always aligned. */ main_dst_offset = dst_offset + skipped_size; main_src_offset = src_offset + skipped_size; while (size) { - unsigned dma_flags = tc_l2_flag; unsigned byte_count = MIN2(size, cp_dma_max_byte_count(sctx)); + unsigned dma_flags = 0; si_cp_dma_prepare(sctx, dst, src, byte_count, size + skipped_size + realign_size, user_flags, coher, &is_first, &dma_flags); si_emit_cp_dma(sctx, main_dst_offset, main_src_offset, - byte_count, dma_flags); + byte_count, dma_flags, cache_policy); size -= byte_count; main_src_offset += byte_count; main_dst_offset += byte_count; } /* Copy the part we skipped because src wasn't aligned. */ if (skipped_size) { - unsigned dma_flags = tc_l2_flag; + unsigned dma_flags = 0; si_cp_dma_prepare(sctx, dst, src, skipped_size, skipped_size + realign_size, user_flags, coher, &is_first, &dma_flags); si_emit_cp_dma(sctx, dst_offset, src_offset, skipped_size, - dma_flags); + dma_flags, cache_policy); } /* Finally, realign the engine if the size wasn't aligned. */ if (realign_size) { si_cp_dma_realign_engine(sctx, realign_size, user_flags, coher, - &is_first); + cache_policy, &is_first); } - if (tc_l2_flag) + if (cache_policy != L2_BYPASS) r600_resource(dst)->TC_L2_dirty = true; /* If it's not a prefetch... */ if (dst_offset != src_offset) sctx->num_cp_dma_calls++; } void cik_prefetch_TC_L2_async(struct si_context *sctx, struct pipe_resource *buf, uint64_t offset, unsigned size) { diff --git a/src/gallium/drivers/radeonsi/si_pipe.h b/src/gallium/drivers/radeonsi/si_pipe.h index 6bf9f2ed6d3..0b398018c4a 100644 --- a/src/gallium/drivers/radeonsi/si_pipe.h +++ b/src/gallium/drivers/radeonsi/si_pipe.h @@ -1098,20 +1098,25 @@ void si_init_clear_functions(struct si_context *sctx); #define SI_CPDMA_SKIP_SYNC_AFTER (1 << 1) /* don't wait for DMA after the copy */ #define SI_CPDMA_SKIP_SYNC_BEFORE (1 << 2) /* don't wait for DMA before the copy (RAW hazards) */ #define SI_CPDMA_SKIP_GFX_SYNC (1 << 3) /* don't flush caches and don't wait for PS/CS */ #define SI_CPDMA_SKIP_BO_LIST_UPDATE (1 << 4) /* don't update the BO list */ #define SI_CPDMA_SKIP_ALL (SI_CPDMA_SKIP_CHECK_CS_SPACE | \ SI_CPDMA_SKIP_SYNC_AFTER | \ SI_CPDMA_SKIP_SYNC_BEFORE | \ SI_CPDMA_SKIP_GFX_SYNC | \ SI_CPDMA_SKIP_BO_LIST_UPDATE) +enum si_cache_policy { + L2_BYPASS, + L2_LRU, /* same as SLC=0 */ +}; + enum si_coherency { SI_COHERENCY_NONE, /* no cache flushes needed */ SI_COHERENCY_SHADER, SI_COHERENCY_CB_META, }; enum si_method { SI_METHOD_CP_DMA, SI_METHOD_BEST, }; -- 2.17.1 _______________________________________________ mesa-dev mailing list mesa-dev@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/mesa-dev