Wrap the active tracking for a GPU references in a slabcache for faster
allocations, and hopefully better fragmentation reduction.

v3: Nothing device specific left, it's just a slabcache that we can
make global.
v4: Include i915_active.h and don't put the initfunc under DEBUG_GEM

Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_active.c | 31 +++++++++++++++++++++++++++---
 drivers/gpu/drm/i915/i915_active.h |  3 +++
 drivers/gpu/drm/i915/i915_pci.c    |  4 ++++
 3 files changed, 35 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_active.c 
b/drivers/gpu/drm/i915/i915_active.c
index b1fefe98f9a6..64661c41532b 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -9,6 +9,17 @@
 
 #define BKL(ref) (&(ref)->i915->drm.struct_mutex)
 
+/*
+ * Active refs memory management
+ *
+ * To be more economical with memory, we reap all the i915_active trees as
+ * they idle (when we know the active requests are inactive) and allocate the
+ * nodes from a local slab cache to hopefully reduce the fragmentation.
+ */
+static struct i915_global_active {
+       struct kmem_cache *slab_cache;
+} global;
+
 struct active_node {
        struct i915_gem_active base;
        struct i915_active *ref;
@@ -23,7 +34,7 @@ __active_park(struct i915_active *ref)
 
        rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
                GEM_BUG_ON(i915_gem_active_isset(&it->base));
-               kfree(it);
+               kmem_cache_free(global.slab_cache, it);
        }
        ref->tree = RB_ROOT;
 }
@@ -96,11 +107,11 @@ active_instance(struct i915_active *ref, u64 idx)
                        p = &parent->rb_left;
        }
 
-       node = kmalloc(sizeof(*node), GFP_KERNEL);
+       node = kmem_cache_alloc(global.slab_cache, GFP_KERNEL);
 
        /* kmalloc may retire the ref->last (thanks shrinker)! */
        if (unlikely(!i915_gem_active_raw(&ref->last, BKL(ref)))) {
-               kfree(node);
+               kmem_cache_free(global.slab_cache, node);
                goto out;
        }
 
@@ -239,3 +250,17 @@ void i915_active_fini(struct i915_active *ref)
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
 #include "selftests/i915_active.c"
 #endif
+
+int __init i915_global_active_init(void)
+{
+       global.slab_cache = KMEM_CACHE(active_node, SLAB_HWCACHE_ALIGN);
+       if (!global.slab_cache)
+               return -ENOMEM;
+
+       return 0;
+}
+
+void __exit i915_global_active_exit(void)
+{
+       kmem_cache_destroy(global.slab_cache);
+}
diff --git a/drivers/gpu/drm/i915/i915_active.h 
b/drivers/gpu/drm/i915/i915_active.h
index ec4b66efd9a7..179b47aeec33 100644
--- a/drivers/gpu/drm/i915/i915_active.h
+++ b/drivers/gpu/drm/i915/i915_active.h
@@ -70,4 +70,7 @@ void i915_active_fini(struct i915_active *ref);
 static inline void i915_active_fini(struct i915_active *ref) { }
 #endif
 
+int i915_global_active_init(void);
+void i915_global_active_exit(void);
+
 #endif /* _I915_ACTIVE_H_ */
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 5d05572c9ff4..852b6b4e8ed8 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -28,6 +28,7 @@
 
 #include <drm/drm_drv.h>
 
+#include "i915_active.h"
 #include "i915_drv.h"
 #include "i915_selftest.h"
 
@@ -800,6 +801,8 @@ static int __init i915_init(void)
        bool use_kms = true;
        int err;
 
+       i915_global_active_init();
+
        err = i915_mock_selftests();
        if (err)
                return err > 0 ? 0 : err;
@@ -831,6 +834,7 @@ static void __exit i915_exit(void)
                return;
 
        pci_unregister_driver(&i915_pci_driver);
+       i915_global_active_exit();
 }
 
 module_init(i915_init);
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to