From: Bas Nieuwenhuizen <b...@basnieuwenhuizen.nl>

Signed-off-by: Timothy Arceri <tarc...@itsqueeze.com>
---
 src/amd/vulkan/radv_pipeline_cache.c | 155 +++++++++++++++++++++++++++++++++++
 src/amd/vulkan/radv_private.h        |  14 ++++
 2 files changed, 169 insertions(+)

diff --git a/src/amd/vulkan/radv_pipeline_cache.c 
b/src/amd/vulkan/radv_pipeline_cache.c
index fc34e07822..15b159a698 100644
--- a/src/amd/vulkan/radv_pipeline_cache.c
+++ b/src/amd/vulkan/radv_pipeline_cache.c
@@ -210,20 +210,83 @@ radv_create_shader_variant_from_pipeline_cache(struct 
radv_device *device,
                void *ptr = radv_alloc_shader_memory(device, variant);
                memcpy(ptr, p, entry->code_sizes[0]);
 
                entry->variants[0] = variant;
        }
 
        p_atomic_inc(&entry->variants[0]->ref_count);
        return entry->variants[0];
 }
 
+bool
+radv_create_shader_variants_from_pipeline_cache(struct radv_device *device,
+                                               struct radv_pipeline_cache 
*cache,
+                                               const unsigned char *sha1,
+                                               struct radv_shader_variant 
**variants)
+{
+       struct cache_entry *entry;
+       if (cache)
+               entry = radv_pipeline_cache_search(cache, sha1);
+       else
+               entry = radv_pipeline_cache_search(device->mem_cache, sha1);
+
+       if (!entry) {
+               if (!device->physical_device->disk_cache)
+                       return false;
+
+               uint8_t disk_sha1[20];
+               disk_cache_compute_key(device->physical_device->disk_cache,
+                                      sha1, 20, disk_sha1);
+               entry = (struct cache_entry *)
+                       disk_cache_get(device->physical_device->disk_cache,
+                                      disk_sha1, NULL);
+               if (!entry)
+                       return false;
+       }
+
+       char *p = entry->code;
+       for(int i = 0; i < MESA_SHADER_STAGES; ++i) {
+               if (!entry->variants[i] && entry->code_sizes[i]) {
+                       struct radv_shader_variant *variant;
+                       struct cache_entry_variant_info info;
+
+                       variant = calloc(1, sizeof(struct radv_shader_variant));
+                       if (!variant)
+                               return false;
+
+                       memcpy(&info, p, sizeof(struct 
cache_entry_variant_info));
+                       p += sizeof(struct cache_entry_variant_info);
+
+                       variant->config = info.config;
+                       variant->info = info.variant_info;
+                       variant->rsrc1 = info.rsrc1;
+                       variant->rsrc2 = info.rsrc2;
+                       variant->code_size = entry->code_sizes[i];
+                       variant->ref_count = 1;
+
+                       void *ptr = radv_alloc_shader_memory(device, variant);
+                       memcpy(ptr, p, entry->code_sizes[i]);
+                       p += entry->code_sizes[i];
+
+                       entry->variants[i] = variant;
+               }
+
+       }
+
+       for (int i = 0; i < MESA_SHADER_STAGES; ++i)
+               if (entry->variants[i])
+                       p_atomic_inc(&entry->variants[i]->ref_count);
+
+       memcpy(variants, entry->variants, sizeof(entry->variants));
+       return true;
+}
+
 
 static void
 radv_pipeline_cache_set_entry(struct radv_pipeline_cache *cache,
                              struct cache_entry *entry)
 {
        const uint32_t mask = cache->table_size - 1;
        const uint32_t start = entry->sha1_dw[0];
 
        /* We'll always be able to insert when we get here. */
        assert(cache->kernel_count < cache->table_size / 2);
@@ -353,20 +416,112 @@ radv_pipeline_cache_insert_shader(struct radv_device 
*device,
        entry->variants[0] = variant;
        p_atomic_inc(&variant->ref_count);
 
        radv_pipeline_cache_add_entry(cache, entry);
 
        cache->modified = true;
        pthread_mutex_unlock(&cache->mutex);
        return variant;
 }
 
+void
+radv_pipeline_cache_insert_shaders(struct radv_device *device,
+                                  struct radv_pipeline_cache *cache,
+                                  const unsigned char *sha1,
+                                  struct radv_shader_variant **variants,
+                                  const void *const *codes,
+                                  const unsigned *code_sizes)
+{
+       if (!cache)
+               cache = device->mem_cache;
+
+       pthread_mutex_lock(&cache->mutex);
+       struct cache_entry *entry = radv_pipeline_cache_search_unlocked(cache, 
sha1);
+       if (entry) {
+               for (int i = 0; i < MESA_SHADER_STAGES; ++i) {
+                       if (entry->variants[i]) {
+                               radv_shader_variant_destroy(cache->device, 
variants[i]);
+                               variants[i] = entry->variants[i];
+                       } else {
+                               entry->variants[i] = variants[i];
+                       }
+                       p_atomic_inc(&variants[i]->ref_count);
+               }
+               pthread_mutex_unlock(&cache->mutex);
+               return;
+       }
+       size_t size = sizeof(*entry);
+       for (int i = 0; i < MESA_SHADER_STAGES; ++i)
+               if (variants[i])
+                       size += sizeof(struct cache_entry_variant_info) + 
code_sizes[i];
+
+
+       entry = vk_alloc(&cache->alloc, size, 8,
+                          VK_SYSTEM_ALLOCATION_SCOPE_CACHE);
+       if (!entry) {
+               pthread_mutex_unlock(&cache->mutex);
+               return;
+       }
+
+       memset(entry, 0, sizeof(*entry));
+       memcpy(entry->sha1, sha1, 20);
+
+       char* p = entry->code;
+       struct cache_entry_variant_info info;
+
+       for (int i = 0; i < MESA_SHADER_STAGES; ++i) {
+               if (!variants[i])
+                       continue;
+
+               entry->code_sizes[i] = code_sizes[i];
+
+               info.config = variants[i]->config;
+               info.variant_info = variants[i]->info;
+               info.rsrc1 = variants[i]->rsrc1;
+               info.rsrc2 = variants[i]->rsrc2;
+               memcpy(p, &info, sizeof(struct cache_entry_variant_info));
+               p += sizeof(struct cache_entry_variant_info);
+
+               memcpy(p, codes[i], code_sizes[i]);
+               p += code_sizes[i];
+       }
+
+       /* Always add cache items to disk. This will allow collection of
+        * compiled shaders by third parties such as steam, even if the app
+        * implements its own pipeline cache.
+        */
+       if (device->physical_device->disk_cache) {
+               uint8_t disk_sha1[20];
+               disk_cache_compute_key(device->physical_device->disk_cache, 
sha1, 20,
+                              disk_sha1);
+               disk_cache_put(device->physical_device->disk_cache,
+                              disk_sha1, entry, entry_size(entry), NULL);
+       }
+
+       /* We delay setting the variant so we have reproducible disk cache
+        * items.
+        */
+       for (int i = 0; i < MESA_SHADER_STAGES; ++i) {
+               if (!variants[i])
+                       continue;
+
+               entry->variants[i] = variants[i];
+               p_atomic_inc(&variants[i]->ref_count);
+       }
+
+       radv_pipeline_cache_add_entry(cache, entry);
+
+       cache->modified = true;
+       pthread_mutex_unlock(&cache->mutex);
+       return;
+}
+
 struct cache_header {
        uint32_t header_size;
        uint32_t header_version;
        uint32_t vendor_id;
        uint32_t device_id;
        uint8_t  uuid[VK_UUID_SIZE];
 };
 
 void
 radv_pipeline_cache_load(struct radv_pipeline_cache *cache,
diff --git a/src/amd/vulkan/radv_private.h b/src/amd/vulkan/radv_private.h
index 70c5c70aaa..b0cb8679fd 100644
--- a/src/amd/vulkan/radv_private.h
+++ b/src/amd/vulkan/radv_private.h
@@ -330,20 +330,34 @@ radv_create_shader_variant_from_pipeline_cache(struct 
radv_device *device,
                                               struct radv_pipeline_cache 
*cache,
                                               const unsigned char *sha1);
 
 struct radv_shader_variant *
 radv_pipeline_cache_insert_shader(struct radv_device *device,
                                  struct radv_pipeline_cache *cache,
                                  const unsigned char *sha1,
                                  struct radv_shader_variant *variant,
                                  const void *code, unsigned code_size);
 
+bool
+radv_create_shader_variants_from_pipeline_cache(struct radv_device *device,
+                                               struct radv_pipeline_cache 
*cache,
+                                               const unsigned char *sha1,
+                                               struct radv_shader_variant 
**variants);
+
+void
+radv_pipeline_cache_insert_shaders(struct radv_device *device,
+                                  struct radv_pipeline_cache *cache,
+                                  const unsigned char *sha1,
+                                  struct radv_shader_variant **variants,
+                                  const void *const *codes,
+                                  const unsigned *code_sizes);
+
 struct radv_meta_state {
        VkAllocationCallbacks alloc;
 
        struct radv_pipeline_cache cache;
 
        /**
         * Use array element `i` for images with `2^i` samples.
         */
        struct {
                VkRenderPass render_pass[NUM_META_FS_KEYS];
-- 
2.13.6

_______________________________________________
mesa-dev mailing list
mesa-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/mesa-dev

Reply via email to