As the most frequent PTE encoding is for the scratch page, cache it upon
creation.

Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahti...@linux.intel.com>
Cc: Mika Kuoppala <mika.kuopp...@linux.intel.com>
Cc: Matthew Auld <matthew.william.a...@gmail.com>
Reviewed-by: Matthew Auld <matthew.william.a...@gmail.com>
---
 drivers/gpu/drm/i915/i915_gem_gtt.c | 20 ++++++++++----------
 drivers/gpu/drm/i915/i915_gem_gtt.h |  1 +
 2 files changed, 11 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c 
b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 79d63e16c2d4..58fd2ea77d00 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -640,11 +640,10 @@ static void gen8_initialize_pt(struct i915_address_space 
*vm,
                gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC));
 }
 
-static void gen6_initialize_pt(struct i915_address_space *vm,
+static void gen6_initialize_pt(struct gen6_hw_ppgtt *ppgtt,
                               struct i915_page_table *pt)
 {
-       fill32_px(vm, pt,
-                 vm->pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0));
+       fill32_px(&ppgtt->base.vm, pt, ppgtt->scratch_pte);
 }
 
 static struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
@@ -1631,9 +1630,7 @@ static struct i915_hw_ppgtt *gen8_ppgtt_create(struct 
drm_i915_private *i915)
 static void gen6_dump_ppgtt(struct i915_hw_ppgtt *base, struct seq_file *m)
 {
        struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
-       struct i915_address_space *vm = &base->vm;
-       const gen6_pte_t scratch_pte =
-               vm->pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
+       const gen6_pte_t scratch_pte = ppgtt->scratch_pte;
        struct i915_page_table *pt;
        u32 pte, pde;
 
@@ -1819,8 +1816,7 @@ static void gen6_ppgtt_clear_range(struct 
i915_address_space *vm,
        unsigned int pde = first_entry / GEN6_PTES;
        unsigned int pte = first_entry % GEN6_PTES;
        unsigned int num_entries = length >> PAGE_SHIFT;
-       const gen6_pte_t scratch_pte =
-               vm->pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
+       const gen6_pte_t scratch_pte = ppgtt->scratch_pte;
 
        while (num_entries) {
                struct i915_page_table *pt = ppgtt->base.pd.page_table[pde++];
@@ -1912,7 +1908,7 @@ static int gen6_alloc_va_range(struct i915_address_space 
*vm,
                                goto unwind_out;
 
                        if (count < GEN6_PTES)
-                               gen6_initialize_pt(vm, pt);
+                               gen6_initialize_pt(ppgtt, pt);
                        ppgtt->base.pd.page_table[pde] = pt;
 
                        if (i915_vma_is_bound(ppgtt->vma,
@@ -1950,13 +1946,17 @@ static int gen6_ppgtt_init_scratch(struct gen6_hw_ppgtt 
*ppgtt)
        if (ret)
                return ret;
 
+       ppgtt->scratch_pte =
+               vm->pte_encode(vm->scratch_page.daddr,
+                              I915_CACHE_NONE, PTE_READ_ONLY);
+
        vm->scratch_pt = alloc_pt(vm);
        if (IS_ERR(vm->scratch_pt)) {
                cleanup_scratch_page(vm);
                return PTR_ERR(vm->scratch_pt);
        }
 
-       gen6_initialize_pt(vm, vm->scratch_pt);
+       gen6_initialize_pt(ppgtt, vm->scratch_pt);
        gen6_for_all_pdes(unused, &ppgtt->base.pd, pde)
                ppgtt->base.pd.page_table[pde] = vm->scratch_pt;
 
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h 
b/drivers/gpu/drm/i915/i915_gem_gtt.h
index dc98830fae69..c50bbde007f8 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -412,6 +412,7 @@ struct gen6_hw_ppgtt {
 
        struct i915_vma *vma;
        gen6_pte_t __iomem *pd_addr;
+       gen6_pte_t scratch_pte;
 
        unsigned int pin_count;
        bool scan_for_unused_pt;
-- 
2.17.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to