Like with gen6/7, we can enable bitmap tracking with all the
preallocations to make sure things actually don't blow up.

Signed-off-by: Ben Widawsky <b...@bwidawsk.net>
---
 drivers/gpu/drm/i915/i915_gem_gtt.c | 101 +++++++++++++++++++++++++++++++-----
 drivers/gpu/drm/i915/i915_gem_gtt.h |  12 +++++
 2 files changed, 99 insertions(+), 14 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c 
b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 02ddac4..3e43875 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -345,8 +345,12 @@ err_out:
 
 static void __free_pd_single(struct i915_pagedir *pd, struct drm_device *dev)
 {
+       WARN(!bitmap_empty(pd->used_pdes, I915_PDES_PER_PD),
+            "Free page directory with %d used pages\n",
+            bitmap_weight(pd->used_pdes, I915_PDES_PER_PD));
        i915_dma_unmap_single(pd, dev);
        __free_page(pd->page);
+       kfree(pd->used_pdes);
        kfree(pd);
 }
 
@@ -359,26 +363,35 @@ static void __free_pd_single(struct i915_pagedir *pd, 
struct drm_device *dev)
 static struct i915_pagedir *alloc_pd_single(struct drm_device *dev)
 {
        struct i915_pagedir *pd;
-       int ret;
+       int ret = -ENOMEM;
 
        pd = kzalloc(sizeof(*pd), GFP_KERNEL);
        if (!pd)
                return ERR_PTR(-ENOMEM);
 
+       pd->used_pdes = kcalloc(BITS_TO_LONGS(I915_PDES_PER_PD),
+                               sizeof(*pd->used_pdes), GFP_KERNEL);
+       if (!pd->used_pdes)
+               goto free_pd;
+
        pd->page = alloc_page(GFP_KERNEL | __GFP_ZERO);
-       if (!pd->page) {
-               kfree(pd);
-               return ERR_PTR(-ENOMEM);
-       }
+       if (!pd->page)
+               goto free_bitmap;
 
        ret = i915_dma_map_px_single(pd, dev);
-       if (ret) {
-               __free_page(pd->page);
-               kfree(pd);
-               return ERR_PTR(ret);
-       }
+       if (ret)
+               goto free_page;
 
        return pd;
+
+free_page:
+       __free_page(pd->page);
+free_bitmap:
+       kfree(pd->used_pdes);
+free_pd:
+       kfree(pd);
+
+       return ERR_PTR(ret);
 }
 
 /* Broadwell Page Directory Pointer Descriptors */
@@ -568,12 +581,48 @@ static void gen8_teardown_va_range(struct 
i915_address_space *vm,
        gen8_for_each_pdpe(pd, &ppgtt->pdp, start, length, temp, pdpe) {
                uint64_t pd_len = gen8_clamp_pd(start, length);
                uint64_t pd_start = start;
+
+               /* Page directories might not be present since the macro rounds
+                * down, and up.
+                */
+               if (!pd) {
+                       WARN(test_bit(pdpe, ppgtt->pdp.used_pdpes),
+                            "PDPE %d is not allocated, but is reserved (%p)\n",
+                            pdpe, vm);
+                       continue;
+               } else {
+                       WARN(!test_bit(pdpe, ppgtt->pdp.used_pdpes),
+                            "PDPE %d not reserved, but is allocated (%p)",
+                            pdpe, vm);
+               }
+
                gen8_for_each_pde(pt, pd, pd_start, pd_len, temp, pde) {
-                       free_pt_single(pt, vm->dev);
-                       pd->page_tables[pde] = NULL;
+                       if (!pt) {
+                               WARN(test_bit(pde, pd->used_pdes),
+                                    "PDE %d is not allocated, but is reserved 
(%p)\n",
+                                    pde, vm);
+                               continue;
+                       } else
+                               WARN(!test_bit(pde, pd->used_pdes),
+                                    "PDE %d not reserved, but is allocated 
(%p)",
+                                    pde, vm);
+
+                       bitmap_clear(pt->used_ptes,
+                                    gen8_pte_index(pd_start),
+                                    gen8_pte_count(pd_start, pd_len));
+
+                       if (bitmap_empty(pt->used_ptes, GEN8_PTES_PER_PT)) {
+                               free_pt_single(pt, vm->dev);
+                               pd->page_tables[pde] = NULL;
+                               WARN_ON(!test_and_clear_bit(pde, 
pd->used_pdes));
+                       }
+               }
+
+               if (bitmap_empty(pd->used_pdes, I915_PDES_PER_PD)) {
+                       free_pd_single(pd, vm->dev);
+                       ppgtt->pdp.pagedirs[pdpe] = NULL;
+                       WARN_ON(!test_and_clear_bit(pdpe, 
ppgtt->pdp.used_pdpes));
                }
-               free_pd_single(pd, vm->dev);
-               ppgtt->pdp.pagedirs[pdpe] = NULL;
        }
 }
 
@@ -619,6 +668,7 @@ unwind_out:
        return -ENOMEM;
 }
 
+/* bitmap of new pagedirs */
 static int gen8_ppgtt_alloc_pagedirs(struct i915_pagedirpo *pdp,
                                     uint64_t start,
                                     uint64_t length,
@@ -634,6 +684,7 @@ static int gen8_ppgtt_alloc_pagedirs(struct i915_pagedirpo 
*pdp,
        gen8_for_each_pdpe(unused, pdp, start, length, temp, pdpe) {
                BUG_ON(unused);
                pdp->pagedirs[pdpe] = alloc_pd_single(dev);
+
                if (IS_ERR(pdp->pagedirs[pdpe]))
                        goto unwind_out;
        }
@@ -655,10 +706,12 @@ static int gen8_alloc_va_range(struct i915_address_space 
*vm,
                container_of(vm, struct i915_hw_ppgtt, base);
        struct i915_pagedir *pd;
        const uint64_t orig_start = start;
+       const uint64_t orig_length = length;
        uint64_t temp;
        uint32_t pdpe;
        int ret;
 
+       /* Do the allocations first so we can easily bail out */
        ret = gen8_ppgtt_alloc_pagedirs(&ppgtt->pdp, start, length,
                                        ppgtt->base.dev);
        if (ret)
@@ -671,6 +724,26 @@ static int gen8_alloc_va_range(struct i915_address_space 
*vm,
                        goto err_out;
        }
 
+       /* Now mark everything we've touched as used. This doesn't allow for
+        * robust error checking, but it makes the code a hell of a lot simpler.
+        */
+       start = orig_start;
+       length = orig_length;
+
+       gen8_for_each_pdpe(pd, &ppgtt->pdp, start, length, temp, pdpe) {
+               struct i915_pagetab *pt;
+               uint64_t pd_len = gen8_clamp_pd(start, length);
+               uint64_t pd_start = start;
+               uint32_t pde;
+               gen8_for_each_pde(pt, &ppgtt->pd, pd_start, pd_len, temp, pde) {
+                       bitmap_set(pd->page_tables[pde]->used_ptes,
+                                  gen8_pte_index(start),
+                                  gen8_pte_count(start, length));
+                       set_bit(pde, pd->used_pdes);
+               }
+               set_bit(pdpe, ppgtt->pdp.used_pdpes);
+       }
+
        return 0;
 
 err_out:
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h 
b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 18a0b68..b92b1fb 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -195,11 +195,13 @@ struct i915_pagedir {
                dma_addr_t daddr;
        };
 
+       unsigned long *used_pdes;
        struct i915_pagetab *page_tables[I915_PDES_PER_PD];
 };
 
 struct i915_pagedirpo {
        /* struct page *page; */
+       DECLARE_BITMAP(used_pdpes, GEN8_LEGACY_PDPES);
        struct i915_pagedir *pagedirs[GEN8_LEGACY_PDPES];
 };
 
@@ -462,6 +464,16 @@ static inline uint32_t gen8_pml4e_index(uint64_t address)
        BUG();
 }
 
+static inline size_t gen8_pte_count(uint64_t addr, uint64_t length)
+{
+       return i915_pte_count(addr, length, GEN8_PDE_SHIFT);
+}
+
+static inline size_t gen8_pde_count(uint64_t addr, uint64_t length)
+{
+       return i915_pde_count(addr, length, GEN8_PDE_SHIFT);
+}
+
 int i915_gem_gtt_init(struct drm_device *dev);
 void i915_gem_init_global_gtt(struct drm_device *dev);
 void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start,
-- 
2.0.4

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to