As we use one scratch page for all ppgtt instances, we can
use one scratch page table and scratch directory across
all ppgtt instances, saving 2 pages + structs per ppgtt.

v2: Rebase

Signed-off-by: Mika Kuoppala <mika.kuopp...@intel.com>
---
 drivers/gpu/drm/i915/i915_gem_gtt.c | 273 +++++++++++++++++++++++-------------
 1 file changed, 178 insertions(+), 95 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c 
b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 61f4da0..ab113ce 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -430,6 +430,17 @@ fail_bitmap:
        return ERR_PTR(ret);
 }
 
+static void gen6_initialize_pt(struct i915_address_space *vm,
+                              struct i915_page_table *pt)
+{
+       gen6_pte_t scratch_pte;
+
+       scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
+                                    I915_CACHE_LLC, true, 0);
+
+       fill32_px(vm->dev, pt, scratch_pte);
+}
+
 static void free_pd(struct drm_device *dev, struct i915_page_directory *pd)
 {
        if (px_page(pd)) {
@@ -467,6 +478,156 @@ free_pd:
        return ERR_PTR(ret);
 }
 
+static void gen8_initialize_pd(struct i915_address_space *vm,
+                              struct i915_page_directory *pd)
+{
+       gen8_pde_t scratch_pde;
+
+       scratch_pde = gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC);
+
+       fill_px(vm->dev, pd, scratch_pde);
+}
+
+#define SCRATCH_PAGE_MAGIC 0xffff00ffffff00ffULL
+
+static int alloc_scratch_page(struct i915_address_space *vm)
+{
+       struct i915_page_scratch *sp;
+       int ret;
+
+       WARN_ON(vm->scratch_page);
+
+       sp = kzalloc(sizeof(*sp), GFP_KERNEL);
+       if (sp == NULL)
+               return -ENOMEM;
+
+       ret = __setup_page_dma(vm->dev, px_base(sp), GFP_DMA32 | __GFP_ZERO);
+       if (ret) {
+               kfree(sp);
+               return ret;
+       }
+
+       fill_px(vm->dev, sp, SCRATCH_PAGE_MAGIC);
+       set_pages_uc(px_page(sp), 1);
+
+       vm->scratch_page = sp;
+
+       return 0;
+}
+
+static void free_scratch_page(struct i915_address_space *vm)
+{
+       struct i915_page_scratch *sp = vm->scratch_page;
+
+       set_pages_wb(px_page(sp), 1);
+
+       cleanup_px(vm->dev, sp);
+       kfree(sp);
+
+       vm->scratch_page = NULL;
+}
+
+static int setup_scratch_ggtt(struct i915_address_space *vm)
+{
+       int ret;
+
+       ret = alloc_scratch_page(vm);
+       if (ret)
+               return ret;
+
+       WARN_ON(vm->scratch_pt);
+
+       if (INTEL_INFO(vm->dev)->gen < 6)
+               return 0;
+
+       vm->scratch_pt = alloc_pt(vm->dev);
+       if (IS_ERR(vm->scratch_pt))
+               return PTR_ERR(vm->scratch_pt);
+
+       WARN_ON(px_dma(vm->scratch_page) == 0);
+
+       if (INTEL_INFO(vm->dev)->gen >= 8) {
+               gen8_initialize_pt(vm, vm->scratch_pt);
+
+               WARN_ON(vm->scratch_pd);
+
+               vm->scratch_pd = alloc_pd(vm->dev);
+               if (IS_ERR(vm->scratch_pd)) {
+                       ret = PTR_ERR(vm->scratch_pd);
+                       goto err_pd;
+               }
+
+               WARN_ON(px_dma(vm->scratch_pt) == 0);
+               gen8_initialize_pd(vm, vm->scratch_pd);
+       } else {
+               gen6_initialize_pt(vm, vm->scratch_pt);
+       }
+
+       return 0;
+
+err_pd:
+       free_pt(vm->dev, vm->scratch_pt);
+       return ret;
+}
+
+static int setup_scratch(struct i915_address_space *vm)
+{
+       struct i915_address_space *ggtt_vm = &to_i915(vm->dev)->gtt.base;
+
+       if (i915_is_ggtt(vm))
+               return setup_scratch_ggtt(vm);
+
+       vm->scratch_page = ggtt_vm->scratch_page;
+       vm->scratch_pt = ggtt_vm->scratch_pt;
+       vm->scratch_pd = ggtt_vm->scratch_pd;
+
+       return 0;
+}
+
+static void check_scratch_page(struct i915_address_space *vm)
+{
+       struct i915_hw_ppgtt *ppgtt =
+               container_of(vm, struct i915_hw_ppgtt, base);
+       int i;
+       u64 *vaddr;
+
+       vaddr = kmap_px(vm->scratch_page);
+
+       for (i = 0; i < PAGE_SIZE / sizeof(u64); i++) {
+               if (vaddr[i] == SCRATCH_PAGE_MAGIC)
+                       continue;
+
+               DRM_ERROR("%p scratch[%d] = 0x%08llx\n", vm, i, vaddr[i]);
+               break;
+       }
+
+       kunmap_px(ppgtt, vaddr);
+}
+
+static void cleanup_scratch_ggtt(struct i915_address_space *vm)
+{
+       check_scratch_page(vm);
+       free_scratch_page(vm);
+
+       if (INTEL_INFO(vm->dev)->gen < 6)
+               return;
+
+       free_pt(vm->dev, vm->scratch_pt);
+
+       if (INTEL_INFO(vm->dev)->gen >= 8)
+               free_pd(vm->dev, vm->scratch_pd);
+}
+
+static void cleanup_scratch(struct i915_address_space *vm)
+{
+       if (i915_is_ggtt(vm))
+               cleanup_scratch_ggtt(vm);
+
+       vm->scratch_page = NULL;
+       vm->scratch_pt = NULL;
+       vm->scratch_pd = NULL;
+}
+
 /* Broadwell Page Directory Pointer Descriptors */
 static int gen8_write_pdp(struct intel_engine_cs *ring,
                          unsigned entry,
@@ -521,7 +682,7 @@ static void gen8_ppgtt_clear_range(struct 
i915_address_space *vm,
        unsigned num_entries = length >> PAGE_SHIFT;
        unsigned last_pte, i;
 
-       scratch_pte = gen8_pte_encode(px_dma(ppgtt->base.scratch_page),
+       scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
                                      I915_CACHE_LLC, use_scratch);
 
        while (num_entries) {
@@ -605,16 +766,6 @@ static void gen8_ppgtt_insert_entries(struct 
i915_address_space *vm,
                kunmap_px(ppgtt, pt_vaddr);
 }
 
-static void gen8_initialize_pd(struct i915_address_space *vm,
-                              struct i915_page_directory *pd)
-{
-       gen8_pde_t scratch_pde;
-
-       scratch_pde = gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC);
-
-       fill_px(vm->dev, pd, scratch_pde);
-}
-
 static void gen8_free_page_tables(struct i915_page_directory *pd, struct 
drm_device *dev)
 {
        int i;
@@ -645,8 +796,7 @@ static void gen8_ppgtt_cleanup(struct i915_address_space 
*vm)
                free_pd(ppgtt->base.dev, ppgtt->pdp.page_directory[i]);
        }
 
-       free_pd(vm->dev, vm->scratch_pd);
-       free_pt(vm->dev, vm->scratch_pt);
+       cleanup_scratch(vm);
 }
 
 /**
@@ -974,16 +1124,7 @@ static int gen8_preallocate_top_level_pdps(struct 
i915_hw_ppgtt *ppgtt)
  */
 static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
 {
-       ppgtt->base.scratch_pt = alloc_pt(ppgtt->base.dev);
-       if (IS_ERR(ppgtt->base.scratch_pt))
-               return PTR_ERR(ppgtt->base.scratch_pt);
-
-       ppgtt->base.scratch_pd = alloc_pd(ppgtt->base.dev);
-       if (IS_ERR(ppgtt->base.scratch_pd))
-               return PTR_ERR(ppgtt->base.scratch_pd);
-
-       gen8_initialize_pt(&ppgtt->base, ppgtt->base.scratch_pt);
-       gen8_initialize_pd(&ppgtt->base, ppgtt->base.scratch_pd);
+       int ret;
 
        ppgtt->base.start = 0;
        ppgtt->base.total = 1ULL << 32;
@@ -996,12 +1137,18 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
 
        ppgtt->switch_mm = gen8_mm_switch;
 
+       ret = setup_scratch(&ppgtt->base);
+       if (ret)
+               return ret;
+
        if (hw_wont_flush_pdp_tlbs(ppgtt)) {
                /* Avoid the tlb flush bug by preallocating
                 * whole top level pdp structure so it stays
                 * static even if our va space grows.
                 */
-               return gen8_preallocate_top_level_pdps(ppgtt);
+               ret = gen8_preallocate_top_level_pdps(ppgtt);
+               if (ret)
+                       return ret;
        }
 
        return 0;
@@ -1306,19 +1453,6 @@ static void gen6_ppgtt_insert_entries(struct 
i915_address_space *vm,
                kunmap_px(ppgtt, pt_vaddr);
 }
 
-static void gen6_initialize_pt(struct i915_address_space *vm,
-                              struct i915_page_table *pt)
-{
-       gen6_pte_t scratch_pte;
-
-       WARN_ON(px_dma(vm->scratch_page) == 0);
-
-       scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
-                                    I915_CACHE_LLC, true, 0);
-
-       fill32_px(vm->dev, pt, scratch_pte);
-}
-
 static int gen6_alloc_va_range(struct i915_address_space *vm,
                               uint64_t start_in, uint64_t length_in)
 {
@@ -1423,7 +1557,7 @@ static void gen6_ppgtt_cleanup(struct i915_address_space 
*vm)
                        free_pt(ppgtt->base.dev, pt);
        }
 
-       free_pt(vm->dev, vm->scratch_pt);
+       cleanup_scratch(vm);
 }
 
 static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
@@ -1438,11 +1572,10 @@ static int gen6_ppgtt_allocate_page_directories(struct 
i915_hw_ppgtt *ppgtt)
         * size. We allocate at the top of the GTT to avoid fragmentation.
         */
        BUG_ON(!drm_mm_initialized(&dev_priv->gtt.base.mm));
-       ppgtt->base.scratch_pt = alloc_pt(ppgtt->base.dev);
-       if (IS_ERR(ppgtt->base.scratch_pt))
-               return PTR_ERR(ppgtt->base.scratch_pt);
 
-       gen6_initialize_pt(&ppgtt->base, ppgtt->base.scratch_pt);
+       ret = setup_scratch(&ppgtt->base);
+       if (ret)
+               return ret;
 
 alloc:
        ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm,
@@ -1473,7 +1606,7 @@ alloc:
        return 0;
 
 err_out:
-       free_pt(ppgtt->base.dev, ppgtt->base.scratch_pt);
+       cleanup_scratch(&ppgtt->base);
        return ret;
 }
 
@@ -1547,10 +1680,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
 
 static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
        ppgtt->base.dev = dev;
-       ppgtt->base.scratch_page = dev_priv->gtt.base.scratch_page;
 
        if (INTEL_INFO(dev)->gen < 8)
                return gen6_ppgtt_init(ppgtt);
@@ -2156,45 +2286,6 @@ void i915_global_gtt_cleanup(struct drm_device *dev)
        vm->cleanup(vm);
 }
 
-#define SCRATCH_PAGE_MAGIC 0xffff00ffffff00ffULL
-
-static int alloc_scratch_page(struct i915_address_space *vm)
-{
-       struct i915_page_scratch *sp;
-       int ret;
-
-       WARN_ON(vm->scratch_page);
-
-       sp = kzalloc(sizeof(*sp), GFP_KERNEL);
-       if (sp == NULL)
-               return -ENOMEM;
-
-       ret = __setup_page_dma(vm->dev, px_base(sp), GFP_DMA32 | __GFP_ZERO);
-       if (ret) {
-               kfree(sp);
-               return ret;
-       }
-
-       fill_px(vm->dev, sp, SCRATCH_PAGE_MAGIC);
-       set_pages_uc(px_page(sp), 1);
-
-       vm->scratch_page = sp;
-
-       return 0;
-}
-
-static void free_scratch_page(struct i915_address_space *vm)
-{
-       struct i915_page_scratch *sp = vm->scratch_page;
-
-       set_pages_wb(px_page(sp), 1);
-
-       cleanup_px(vm->dev, sp);
-       kfree(sp);
-
-       vm->scratch_page = NULL;
-}
-
 static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
 {
        snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
@@ -2278,7 +2369,6 @@ static int ggtt_probe_common(struct drm_device *dev,
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        phys_addr_t gtt_phys_addr;
-       int ret;
 
        /* For Modern GENs the PTEs and register space are split in the BAR */
        gtt_phys_addr = pci_resource_start(dev->pdev, 0) +
@@ -2300,14 +2390,7 @@ static int ggtt_probe_common(struct drm_device *dev,
                return -ENOMEM;
        }
 
-       ret = alloc_scratch_page(&dev_priv->gtt.base);
-       if (ret) {
-               DRM_ERROR("Scratch setup failed\n");
-               /* iounmap will also get called at remove, but meh */
-               iounmap(dev_priv->gtt.gsm);
-       }
-
-       return ret;
+       return setup_scratch(&dev_priv->gtt.base);
 }
 
 /* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
@@ -2479,7 +2562,7 @@ static void gen6_gmch_remove(struct i915_address_space 
*vm)
        struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base);
 
        iounmap(gtt->gsm);
-       free_scratch_page(vm);
+       cleanup_scratch(vm);
 }
 
 static int i915_gmch_probe(struct drm_device *dev,
-- 
1.9.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to