On Tue, Jan 08, 2013 at 10:53:11AM +0000, Chris Wilson wrote:
> Clients like i915 needs to segregate cache domains within the GTT which
> can lead to small amounts of fragmentation. By allocating the uncached
> buffers from the bottom and the cacheable buffers from the top, we can
> reduce the amount of wasted space and also optimize allocation of the
> mappable portion of the GTT to only those buffers that require CPU
> access through the GTT.
> 
> Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk>

Two things:
- best_match is dead code and can be garbage-collected. Imo better to do
  that before adding new flags and special modes.
- I'm somewhat freaked out by the complexity of this all, and I do wonder
  whether we're not past the point where forking drm_mm.c for i915.ko is
  the right thing. We're adding (and have added already) tons of special
  cause where we're the only users ...

I'll punt for now ;-)
-Daniel

> ---
>  drivers/gpu/drm/drm_mm.c        |   73 
> +++++++++++++++++++++++++--------------
>  drivers/gpu/drm/i915/i915_gem.c |    4 +--
>  include/drm/drm_mm.h            |   42 ++++++++++++++--------
>  3 files changed, 77 insertions(+), 42 deletions(-)
> 
> diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
> index b751b8e..59b21ec 100644
> --- a/drivers/gpu/drm/drm_mm.c
> +++ b/drivers/gpu/drm/drm_mm.c
> @@ -49,7 +49,7 @@
>  
>  #define MM_UNUSED_TARGET 4
>  
> -static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
> +static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, bool atomic)
>  {
>       struct drm_mm_node *child;
>  
> @@ -105,7 +105,8 @@ EXPORT_SYMBOL(drm_mm_pre_get);
>  static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
>                                struct drm_mm_node *node,
>                                unsigned long size, unsigned alignment,
> -                              unsigned long color)
> +                              unsigned long color,
> +                              unsigned flags)
>  {
>       struct drm_mm *mm = hole_node->mm;
>       unsigned long hole_start = drm_mm_hole_node_start(hole_node);
> @@ -118,12 +119,22 @@ static void drm_mm_insert_helper(struct drm_mm_node 
> *hole_node,
>       if (mm->color_adjust)
>               mm->color_adjust(hole_node, color, &adj_start, &adj_end);
>  
> +     if (flags & DRM_MM_CREATE_TOP)
> +             adj_start = adj_end - size;
> +
>       if (alignment) {
>               unsigned tmp = adj_start % alignment;
> -             if (tmp)
> -                     adj_start += alignment - tmp;
> +             if (tmp) {
> +                     if (flags & DRM_MM_CREATE_TOP)
> +                             adj_start -= tmp;
> +                     else
> +                             adj_start += alignment - tmp;
> +             }
>       }
>  
> +     BUG_ON(adj_start < hole_start);
> +     BUG_ON(adj_end > hole_end);
> +
>       if (adj_start == hole_start) {
>               hole_node->hole_follows = 0;
>               list_del(&hole_node->hole_stack);
> @@ -150,7 +161,7 @@ static void drm_mm_insert_helper(struct drm_mm_node 
> *hole_node,
>  struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
>                                       unsigned long start,
>                                       unsigned long size,
> -                                     bool atomic)
> +                                     unsigned flags)
>  {
>       struct drm_mm_node *hole, *node;
>       unsigned long end = start + size;
> @@ -161,7 +172,7 @@ struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
>               if (hole_start > start || hole_end < end)
>                       continue;
>  
> -             node = drm_mm_kmalloc(mm, atomic);
> +             node = drm_mm_kmalloc(mm, flags & DRM_MM_CREATE_ATOMIC);
>               if (unlikely(node == NULL))
>                       return NULL;
>  
> @@ -196,15 +207,15 @@ struct drm_mm_node *drm_mm_get_block_generic(struct 
> drm_mm_node *hole_node,
>                                            unsigned long size,
>                                            unsigned alignment,
>                                            unsigned long color,
> -                                          int atomic)
> +                                          unsigned flags)
>  {
>       struct drm_mm_node *node;
>  
> -     node = drm_mm_kmalloc(hole_node->mm, atomic);
> +     node = drm_mm_kmalloc(hole_node->mm, flags & DRM_MM_CREATE_ATOMIC);
>       if (unlikely(node == NULL))
>               return NULL;
>  
> -     drm_mm_insert_helper(hole_node, node, size, alignment, color);
> +     drm_mm_insert_helper(hole_node, node, size, alignment, color, flags);
>  
>       return node;
>  }
> @@ -220,11 +231,11 @@ int drm_mm_insert_node(struct drm_mm *mm, struct 
> drm_mm_node *node,
>  {
>       struct drm_mm_node *hole_node;
>  
> -     hole_node = drm_mm_search_free(mm, size, alignment, false);
> +     hole_node = drm_mm_search_free(mm, size, alignment, 0);
>       if (!hole_node)
>               return -ENOSPC;
>  
> -     drm_mm_insert_helper(hole_node, node, size, alignment, 0);
> +     drm_mm_insert_helper(hole_node, node, size, alignment, 0, 0);
>  
>       return 0;
>  }
> @@ -234,7 +245,8 @@ static void drm_mm_insert_helper_range(struct drm_mm_node 
> *hole_node,
>                                      struct drm_mm_node *node,
>                                      unsigned long size, unsigned alignment,
>                                      unsigned long color,
> -                                    unsigned long start, unsigned long end)
> +                                    unsigned long start, unsigned long end,
> +                                    unsigned flags)
>  {
>       struct drm_mm *mm = hole_node->mm;
>       unsigned long hole_start = drm_mm_hole_node_start(hole_node);
> @@ -249,11 +261,20 @@ static void drm_mm_insert_helper_range(struct 
> drm_mm_node *hole_node,
>  
>       if (adj_start < start)
>               adj_start = start;
> +     if (adj_end > end)
> +             adj_end = end;
> +
> +     if (flags & DRM_MM_CREATE_TOP)
> +             adj_start = adj_end - size;
>  
>       if (alignment) {
>               unsigned tmp = adj_start % alignment;
> -             if (tmp)
> -                     adj_start += alignment - tmp;
> +             if (tmp) {
> +                     if (flags & DRM_MM_CREATE_TOP)
> +                             adj_start -= tmp;
> +                     else
> +                             adj_start += alignment - tmp;
> +             }
>       }
>  
>       if (adj_start == hole_start) {
> @@ -270,6 +291,8 @@ static void drm_mm_insert_helper_range(struct drm_mm_node 
> *hole_node,
>       INIT_LIST_HEAD(&node->hole_stack);
>       list_add(&node->node_list, &hole_node->node_list);
>  
> +     BUG_ON(node->start < start);
> +     BUG_ON(node->start < adj_start);
>       BUG_ON(node->start + node->size > adj_end);
>       BUG_ON(node->start + node->size > end);
>  
> @@ -286,16 +309,16 @@ struct drm_mm_node 
> *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node
>                                               unsigned long color,
>                                               unsigned long start,
>                                               unsigned long end,
> -                                             int atomic)
> +                                             unsigned flags)
>  {
>       struct drm_mm_node *node;
>  
> -     node = drm_mm_kmalloc(hole_node->mm, atomic);
> +     node = drm_mm_kmalloc(hole_node->mm, flags & DRM_MM_CREATE_ATOMIC);
>       if (unlikely(node == NULL))
>               return NULL;
>  
>       drm_mm_insert_helper_range(hole_node, node, size, alignment, color,
> -                                start, end);
> +                                start, end, flags);
>  
>       return node;
>  }
> @@ -313,12 +336,12 @@ int drm_mm_insert_node_in_range(struct drm_mm *mm, 
> struct drm_mm_node *node,
>       struct drm_mm_node *hole_node;
>  
>       hole_node = drm_mm_search_free_in_range(mm, size, alignment,
> -                                             start, end, false);
> +                                             start, end, 0);
>       if (!hole_node)
>               return -ENOSPC;
>  
>       drm_mm_insert_helper_range(hole_node, node, size, alignment, 0,
> -                                start, end);
> +                                start, end, 0);
>  
>       return 0;
>  }
> @@ -399,7 +422,7 @@ struct drm_mm_node *drm_mm_search_free_generic(const 
> struct drm_mm *mm,
>                                              unsigned long size,
>                                              unsigned alignment,
>                                              unsigned long color,
> -                                            bool best_match)
> +                                            unsigned flags)
>  {
>       struct drm_mm_node *entry;
>       struct drm_mm_node *best;
> @@ -412,7 +435,7 @@ struct drm_mm_node *drm_mm_search_free_generic(const 
> struct drm_mm *mm,
>       best = NULL;
>       best_size = ~0UL;
>  
> -     drm_mm_for_each_hole(entry, mm, adj_start, adj_end) {
> +     __drm_mm_for_each_hole(entry, mm, adj_start, adj_end, flags & 
> DRM_MM_SEARCH_BELOW) {
>               if (mm->color_adjust) {
>                       mm->color_adjust(entry, color, &adj_start, &adj_end);
>                       if (adj_end <= adj_start)
> @@ -422,7 +445,7 @@ struct drm_mm_node *drm_mm_search_free_generic(const 
> struct drm_mm *mm,
>               if (!check_free_hole(adj_start, adj_end, size, alignment))
>                       continue;
>  
> -             if (!best_match)
> +             if ((flags & DRM_MM_SEARCH_BEST) == 0)
>                       return entry;
>  
>               if (entry->size < best_size) {
> @@ -441,7 +464,7 @@ struct drm_mm_node 
> *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
>                                                       unsigned long color,
>                                                       unsigned long start,
>                                                       unsigned long end,
> -                                                     bool best_match)
> +                                                     unsigned flags)
>  {
>       struct drm_mm_node *entry;
>       struct drm_mm_node *best;
> @@ -454,7 +477,7 @@ struct drm_mm_node 
> *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
>       best = NULL;
>       best_size = ~0UL;
>  
> -     drm_mm_for_each_hole(entry, mm, adj_start, adj_end) {
> +     __drm_mm_for_each_hole(entry, mm, adj_start, adj_end, flags & 
> DRM_MM_SEARCH_BELOW) {
>               if (adj_start < start)
>                       adj_start = start;
>               if (adj_end > end)
> @@ -469,7 +492,7 @@ struct drm_mm_node 
> *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
>               if (!check_free_hole(adj_start, adj_end, size, alignment))
>                       continue;
>  
> -             if (!best_match)
> +             if ((flags & DRM_MM_SEARCH_BEST) == 0)
>                       return entry;
>  
>               if (entry->size < best_size) {
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index 89a1f03..8728ca2 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -2972,12 +2972,12 @@ i915_gem_object_bind_to_gtt(struct 
> drm_i915_gem_object *obj,
>                               drm_mm_get_block_range_generic(free_space,
>                                                              size, alignment, 
> obj->cache_level,
>                                                              0, 
> dev_priv->mm.gtt_mappable_end,
> -                                                            false);
> +                                                            0);
>               else
>                       free_space =
>                               drm_mm_get_block_generic(free_space,
>                                                        size, alignment, 
> obj->cache_level,
> -                                                      false);
> +                                                      0);
>       }
>       if (free_space == NULL) {
>               ret = i915_gem_evict_something(dev, size, alignment,
> diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
> index cd45365..1f3f5f1 100644
> --- a/include/drm/drm_mm.h
> +++ b/include/drm/drm_mm.h
> @@ -135,18 +135,26 @@ static inline unsigned long drm_mm_hole_node_end(struct 
> drm_mm_node *hole_node)
>            1 : 0; \
>            entry = list_entry(entry->hole_stack.next, struct drm_mm_node, 
> hole_stack))
>  
> +#define __drm_mm_for_each_hole(entry, mm, hole_start, hole_end, backwards) \
> +     for (entry = list_entry((backwards) ? (mm)->hole_stack.prev : 
> (mm)->hole_stack.next, struct drm_mm_node, hole_stack); \
> +          &entry->hole_stack != &(mm)->hole_stack ? \
> +          hole_start = drm_mm_hole_node_start(entry), \
> +          hole_end = drm_mm_hole_node_end(entry), \
> +          1 : 0; \
> +          entry = list_entry((backwards) ? entry->hole_stack.prev : 
> entry->hole_stack.next, struct drm_mm_node, hole_stack))
> +
>  /*
>   * Basic range manager support (drm_mm.c)
>   */
>  extern struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
>                                              unsigned long start,
>                                              unsigned long size,
> -                                            bool atomic);
> +                                            unsigned flags);
>  extern struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
>                                                   unsigned long size,
>                                                   unsigned alignment,
>                                                   unsigned long color,
> -                                                 int atomic);
> +                                                 unsigned flags);
>  extern struct drm_mm_node *drm_mm_get_block_range_generic(
>                                               struct drm_mm_node *node,
>                                               unsigned long size,
> @@ -154,7 +162,9 @@ extern struct drm_mm_node *drm_mm_get_block_range_generic(
>                                               unsigned long color,
>                                               unsigned long start,
>                                               unsigned long end,
> -                                             int atomic);
> +                                             unsigned flags);
> +#define DRM_MM_CREATE_ATOMIC 0x1
> +#define DRM_MM_CREATE_TOP    0x2
>  static inline struct drm_mm_node *drm_mm_get_block(struct drm_mm_node 
> *parent,
>                                                  unsigned long size,
>                                                  unsigned alignment)
> @@ -165,7 +175,7 @@ static inline struct drm_mm_node 
> *drm_mm_get_block_atomic(struct drm_mm_node *pa
>                                                         unsigned long size,
>                                                         unsigned alignment)
>  {
> -     return drm_mm_get_block_generic(parent, size, alignment, 0, 1);
> +     return drm_mm_get_block_generic(parent, size, alignment, 0, 
> DRM_MM_CREATE_ATOMIC);
>  }
>  static inline struct drm_mm_node *drm_mm_get_block_range(
>                                               struct drm_mm_node *parent,
> @@ -196,7 +206,7 @@ static inline struct drm_mm_node 
> *drm_mm_get_block_atomic_range(
>                                               unsigned long end)
>  {
>       return drm_mm_get_block_range_generic(parent, size, alignment, 0,
> -                                             start, end, 1);
> +                                             start, end, 
> DRM_MM_CREATE_ATOMIC);
>  }
>  extern int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
>                             unsigned long size, unsigned alignment);
> @@ -211,7 +221,7 @@ extern struct drm_mm_node 
> *drm_mm_search_free_generic(const struct drm_mm *mm,
>                                                     unsigned long size,
>                                                     unsigned alignment,
>                                                     unsigned long color,
> -                                                   bool best_match);
> +                                                   unsigned flags);
>  extern struct drm_mm_node *drm_mm_search_free_in_range_generic(
>                                               const struct drm_mm *mm,
>                                               unsigned long size,
> @@ -219,13 +229,15 @@ extern struct drm_mm_node 
> *drm_mm_search_free_in_range_generic(
>                                               unsigned long color,
>                                               unsigned long start,
>                                               unsigned long end,
> -                                             bool best_match);
> +                                             unsigned flags);
> +#define DRM_MM_SEARCH_BEST   0x1
> +#define DRM_MM_SEARCH_BELOW  0x2
>  static inline struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
>                                                    unsigned long size,
>                                                    unsigned alignment,
> -                                                  bool best_match)
> +                                                  unsigned flags)
>  {
> -     return drm_mm_search_free_generic(mm,size, alignment, 0, best_match);
> +     return drm_mm_search_free_generic(mm,size, alignment, 0, flags);
>  }
>  static inline  struct drm_mm_node *drm_mm_search_free_in_range(
>                                               const struct drm_mm *mm,
> @@ -233,18 +245,18 @@ static inline  struct drm_mm_node 
> *drm_mm_search_free_in_range(
>                                               unsigned alignment,
>                                               unsigned long start,
>                                               unsigned long end,
> -                                             bool best_match)
> +                                             unsigned flags)
>  {
>       return drm_mm_search_free_in_range_generic(mm, size, alignment, 0,
> -                                                start, end, best_match);
> +                                                start, end, flags);
>  }
>  static inline struct drm_mm_node *drm_mm_search_free_color(const struct 
> drm_mm *mm,
>                                                          unsigned long size,
>                                                          unsigned alignment,
>                                                          unsigned long color,
> -                                                        bool best_match)
> +                                                        unsigned flags)
>  {
> -     return drm_mm_search_free_generic(mm,size, alignment, color, 
> best_match);
> +     return drm_mm_search_free_generic(mm,size, alignment, color, flags);
>  }
>  static inline  struct drm_mm_node *drm_mm_search_free_in_range_color(
>                                               const struct drm_mm *mm,
> @@ -253,10 +265,10 @@ static inline  struct drm_mm_node 
> *drm_mm_search_free_in_range_color(
>                                               unsigned long color,
>                                               unsigned long start,
>                                               unsigned long end,
> -                                             bool best_match)
> +                                             unsigned flags)
>  {
>       return drm_mm_search_free_in_range_generic(mm, size, alignment, color,
> -                                                start, end, best_match);
> +                                                start, end, flags);
>  }
>  extern int drm_mm_init(struct drm_mm *mm,
>                      unsigned long start,
> -- 
> 1.7.10.4
> 
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx@lists.freedesktop.org
> http://lists.freedesktop.org/mailman/listinfo/intel-gfx

-- 
Daniel Vetter
Software Engineer, Intel Corporation
+41 (0) 79 365 57 48 - http://blog.ffwll.ch
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to