On Wed, Dec 28, 2016 at 02:48:52PM +0100, Daniel Vetter wrote:
> On Thu, Dec 22, 2016 at 08:36:40AM +0000, Chris Wilson wrote:
> > The drm_mm range manager claimed to support top-down insertion, but it
> > was neither searching for the top-most hole that could fit the
> > allocation request nor fitting the request to the hole correctly.
> > 
> > In order to search the range efficiently, we create a secondary index
> > for the holes using either their size or their address. This index
> > allows us to find the smallest hole or the hole at the bottom or top of
> > the range efficiently, whilst keeping the hole stack to rapidly service
> > evictions.
> > 
> > v2: Search for holes both high and low. Rename flags to mode.
> > v3: Discover rb_entry_safe() and use it!
> > 
> > Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
> > Reviewed-by: Joonas Lahtinen <joonas.lahtinen at linux.intel.com>
> 
> This feels like much without driver maintainer acks. Can you pls resend
> with them on cc (plus the gcc appeasement) so we can go ack fishing a bit?
> 
> Also probably easier to get this in after the backmerging is done.
> Everything else besides these 2 last patches, and the 2 i915-only patches
> is now in drm-misc.

While I'm asking for resends: kerneldoc for the new DRM_MM_INSERT_* enums
would be really great. I think the cleaned up logic in here with using
special search lists for each makes a lot more sense, and it's worth to
document that a bit more.
-Daniel

> 
> Thanks, Daniel
> 
> > ---
> >  drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c  |  16 +-
> >  drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c |  20 +-
> >  drivers/gpu/drm/armada/armada_gem.c          |   4 +-
> >  drivers/gpu/drm/drm_mm.c                     | 511 
> > +++++++++++++++------------
> >  drivers/gpu/drm/drm_vma_manager.c            |   3 +-
> >  drivers/gpu/drm/etnaviv/etnaviv_mmu.c        |   8 +-
> >  drivers/gpu/drm/i915/i915_gem.c              |  10 +-
> >  drivers/gpu/drm/i915/i915_gem_evict.c        |   9 +-
> >  drivers/gpu/drm/i915/i915_gem_execbuffer.c   |   5 +-
> >  drivers/gpu/drm/i915/i915_gem_gtt.c          |  39 +-
> >  drivers/gpu/drm/i915/i915_gem_stolen.c       |   6 +-
> >  drivers/gpu/drm/msm/msm_gem.c                |   3 +-
> >  drivers/gpu/drm/msm/msm_gem_vma.c            |   3 +-
> >  drivers/gpu/drm/selftests/test-drm_mm.c      |  58 ++-
> >  drivers/gpu/drm/sis/sis_mm.c                 |   6 +-
> >  drivers/gpu/drm/tegra/gem.c                  |   4 +-
> >  drivers/gpu/drm/ttm/ttm_bo_manager.c         |  18 +-
> >  drivers/gpu/drm/vc4/vc4_crtc.c               |   2 +-
> >  drivers/gpu/drm/vc4/vc4_hvs.c                |   3 +-
> >  drivers/gpu/drm/vc4/vc4_plane.c              |   6 +-
> >  drivers/gpu/drm/via/via_mm.c                 |   4 +-
> >  drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c       |  10 +-
> >  include/drm/drm_mm.h                         | 135 +++----
> >  23 files changed, 434 insertions(+), 449 deletions(-)
> > 
> > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c 
> > b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
> > index 00f46b0e076d..d841fcb2e709 100644
> > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
> > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
> > @@ -97,8 +97,7 @@ int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
> >  {
> >     struct amdgpu_gtt_mgr *mgr = man->priv;
> >     struct drm_mm_node *node = mem->mm_node;
> > -   enum drm_mm_search_flags sflags = DRM_MM_SEARCH_BEST;
> > -   enum drm_mm_allocator_flags aflags = DRM_MM_CREATE_DEFAULT;
> > +   enum drm_mm_insert_mode mode;
> >     unsigned long fpfn, lpfn;
> >     int r;
> >  
> > @@ -115,15 +114,14 @@ int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager 
> > *man,
> >     else
> >             lpfn = man->size;
> >  
> > -   if (place && place->flags & TTM_PL_FLAG_TOPDOWN) {
> > -           sflags = DRM_MM_SEARCH_BELOW;
> > -           aflags = DRM_MM_CREATE_TOP;
> > -   }
> > +   mode = DRM_MM_INSERT_BEST;
> > +   if (place && place->mode & TTM_PL_FLAG_TOPDOWN)
> > +           mode = DRM_MM_INSERT_HIGH;
> >  
> >     spin_lock(&mgr->lock);
> > -   r = drm_mm_insert_node_in_range_generic(&mgr->mm, node, mem->num_pages,
> > -                                           mem->page_alignment, 0,
> > -                                           fpfn, lpfn, sflags, aflags);
> > +   r = drm_mm_insert_node_in_range(&mgr->mm, node,
> > +                                   mem->num_pages, mem->page_alignment, 0,
> > +                                   fpfn, lpfn, mode);
> >     spin_unlock(&mgr->lock);
> >  
> >     if (!r) {
> > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c 
> > b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
> > index d710226a0fff..5f106ad815ce 100644
> > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
> > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
> > @@ -97,8 +97,7 @@ static int amdgpu_vram_mgr_new(struct 
> > ttm_mem_type_manager *man,
> >     struct amdgpu_vram_mgr *mgr = man->priv;
> >     struct drm_mm *mm = &mgr->mm;
> >     struct drm_mm_node *nodes;
> > -   enum drm_mm_search_flags sflags = DRM_MM_SEARCH_DEFAULT;
> > -   enum drm_mm_allocator_flags aflags = DRM_MM_CREATE_DEFAULT;
> > +   enum drm_mm_insert_mode mode;
> >     unsigned long lpfn, num_nodes, pages_per_node, pages_left;
> >     unsigned i;
> >     int r;
> > @@ -121,10 +120,9 @@ static int amdgpu_vram_mgr_new(struct 
> > ttm_mem_type_manager *man,
> >     if (!nodes)
> >             return -ENOMEM;
> >  
> > -   if (place->flags & TTM_PL_FLAG_TOPDOWN) {
> > -           sflags = DRM_MM_SEARCH_BELOW;
> > -           aflags = DRM_MM_CREATE_TOP;
> > -   }
> > +   mode = DRM_MM_INSERT_BEST;
> > +   if (place->flags & TTM_PL_FLAG_TOPDOWN)
> > +           mode = DRM_MM_INSERT_HIGH;
> >  
> >     pages_left = mem->num_pages;
> >  
> > @@ -135,13 +133,11 @@ static int amdgpu_vram_mgr_new(struct 
> > ttm_mem_type_manager *man,
> >  
> >             if (pages == pages_per_node)
> >                     alignment = pages_per_node;
> > -           else
> > -                   sflags |= DRM_MM_SEARCH_BEST;
> >  
> > -           r = drm_mm_insert_node_in_range_generic(mm, &nodes[i], pages,
> > -                                                   alignment, 0,
> > -                                                   place->fpfn, lpfn,
> > -                                                   sflags, aflags);
> > +           r = drm_mm_insert_node_in_range(mm, &nodes[i],
> > +                                           pages, alignment, 0,
> > +                                           place->fpfn, lpfn,
> > +                                           mode);
> >             if (unlikely(r))
> >                     goto error;
> >  
> > diff --git a/drivers/gpu/drm/armada/armada_gem.c 
> > b/drivers/gpu/drm/armada/armada_gem.c
> > index 768087ddb046..65b029bc84b5 100644
> > --- a/drivers/gpu/drm/armada/armada_gem.c
> > +++ b/drivers/gpu/drm/armada/armada_gem.c
> > @@ -149,8 +149,8 @@ armada_gem_linear_back(struct drm_device *dev, struct 
> > armada_gem_object *obj)
> >                     return -ENOSPC;
> >  
> >             mutex_lock(&priv->linear_lock);
> > -           ret = drm_mm_insert_node(&priv->linear, node, size, align,
> > -                                    DRM_MM_SEARCH_DEFAULT);
> > +           ret = drm_mm_insert_node_generic(&priv->linear, node,
> > +                                            size, align, 0, 0);
> >             mutex_unlock(&priv->linear_lock);
> >             if (ret) {
> >                     kfree(node);
> > diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
> > index 58a7e3bbe130..960c9cd7dc23 100644
> > --- a/drivers/gpu/drm/drm_mm.c
> > +++ b/drivers/gpu/drm/drm_mm.c
> > @@ -92,14 +92,6 @@
> >   * some basic allocator dumpers for debugging.
> >   */
> >  
> > -static struct drm_mm_node *drm_mm_search_free_in_range_generic(const 
> > struct drm_mm *mm,
> > -                                           u64 size,
> > -                                           u64 alignment,
> > -                                           unsigned long color,
> > -                                           u64 start,
> > -                                           u64 end,
> > -                                           enum drm_mm_search_flags flags);
> > -
> >  #ifdef CONFIG_DRM_DEBUG_MM
> >  #include <linux/stackdepot.h>
> >  
> > @@ -221,69 +213,47 @@ static void drm_mm_interval_tree_add_node(struct 
> > drm_mm_node *hole_node,
> >                         &drm_mm_interval_tree_augment);
> >  }
> >  
> > -static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
> > -                            struct drm_mm_node *node,
> > -                            u64 size, u64 alignment,
> > -                            unsigned long color,
> > -                            u64 range_start, u64 range_end,
> > -                            enum drm_mm_allocator_flags flags)
> > +#define RB_INSERT(root, member, expr) do { \
> > +   struct rb_node **link = &root.rb_node, *rb = NULL; \
> > +   u64 x = expr(node); \
> > +   while (*link) { \
> > +           rb = *link; \
> > +           if (x < expr(rb_entry(rb, struct drm_mm_node, member))) \
> > +                   link = &rb->rb_left; \
> > +           else \
> > +                   link = &rb->rb_right; \
> > +   } \
> > +   rb_link_node(&node->member, rb, link); \
> > +   rb_insert_color(&node->member, &root); \
> > +} while (0)
> > +
> > +#define HOLE_SIZE(NODE) ((NODE)->hole_size)
> > +#define HOLE_ADDR(NODE) (__drm_mm_hole_node_start(NODE))
> > +
> > +static void add_hole(struct drm_mm_node *node)
> >  {
> > -   struct drm_mm *mm = hole_node->mm;
> > -   u64 hole_start = drm_mm_hole_node_start(hole_node);
> > -   u64 hole_end = drm_mm_hole_node_end(hole_node);
> > -   u64 adj_start = hole_start;
> > -   u64 adj_end = hole_end;
> > -
> > -   DRM_MM_BUG_ON(!drm_mm_hole_follows(hole_node) || node->allocated);
> > -
> > -   if (mm->color_adjust)
> > -           mm->color_adjust(hole_node, color, &adj_start, &adj_end);
> > -
> > -   adj_start = max(adj_start, range_start);
> > -   adj_end = min(adj_end, range_end);
> > -
> > -   if (flags & DRM_MM_CREATE_TOP)
> > -           adj_start = adj_end - size;
> > -
> > -   if (alignment) {
> > -           u64 rem;
> > -
> > -           div64_u64_rem(adj_start, alignment, &rem);
> > -           if (rem) {
> > -                   if (flags & DRM_MM_CREATE_TOP)
> > -                           adj_start -= rem;
> > -                   else
> > -                           adj_start += alignment - rem;
> > -           }
> > -   }
> > -
> > -   if (adj_start == hole_start) {
> > -           hole_node->hole_follows = 0;
> > -           list_del(&hole_node->hole_stack);
> > -   }
> > +   struct drm_mm *mm = node->mm;
> >  
> > -   node->start = adj_start;
> > -   node->size = size;
> > -   node->mm = mm;
> > -   node->color = color;
> > -   node->allocated = 1;
> > +   node->hole_size =
> > +           __drm_mm_hole_node_end(node) - __drm_mm_hole_node_start(node);
> > +   DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
> >  
> > -   list_add(&node->node_list, &hole_node->node_list);
> > +   RB_INSERT(mm->holes_size, rb_hole_size, HOLE_SIZE);
> > +   RB_INSERT(mm->holes_addr, rb_hole_addr, HOLE_ADDR);
> >  
> > -   drm_mm_interval_tree_add_node(hole_node, node);
> > +   list_add(&node->hole_stack, &mm->hole_stack);
> > +}
> >  
> > -   DRM_MM_BUG_ON(node->start < range_start);
> > -   DRM_MM_BUG_ON(node->start < adj_start);
> > -   DRM_MM_BUG_ON(node->start + node->size > adj_end);
> > -   DRM_MM_BUG_ON(node->start + node->size > range_end);
> > +static void rm_hole(struct drm_mm_node *node)
> > +{
> > +   DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
> >  
> > -   node->hole_follows = 0;
> > -   if (__drm_mm_hole_node_start(node) < hole_end) {
> > -           list_add(&node->hole_stack, &mm->hole_stack);
> > -           node->hole_follows = 1;
> > -   }
> > +   list_del(&node->hole_stack);
> > +   rb_erase(&node->rb_hole_size, &node->mm->holes_size);
> > +   rb_erase(&node->rb_hole_addr, &node->mm->holes_addr);
> > +   node->hole_size = 0;
> >  
> > -   save_stack(node);
> > +   DRM_MM_BUG_ON(drm_mm_hole_follows(node));
> >  }
> >  
> >  /**
> > @@ -313,7 +283,7 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct 
> > drm_mm_node *node)
> >  
> >     /* Find the relevant hole to add our node to */
> >     hole = drm_mm_interval_tree_iter_first(&mm->interval_tree,
> > -                                          node->start, ~(u64)0);
> > +                                          node->start, U64_MAX);
> >     if (hole) {
> >             if (hole->start < end)
> >                     return -ENOSPC;
> > @@ -321,12 +291,12 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct 
> > drm_mm_node *node)
> >             hole = list_entry(drm_mm_nodes(mm), typeof(*hole), node_list);
> >     }
> >  
> > -   hole = list_last_entry(&hole->node_list, typeof(*hole), node_list);
> > +   hole = list_prev_entry(hole, node_list);
> >     if (!drm_mm_hole_follows(hole))
> >             return -ENOSPC;
> >  
> >     adj_start = hole_start = __drm_mm_hole_node_start(hole);
> > -   adj_end = hole_end = __drm_mm_hole_node_end(hole);
> > +   adj_end = hole_end = hole_start + hole->hole_size;
> >  
> >     if (mm->color_adjust)
> >             mm->color_adjust(hole, node->color, &adj_start, &adj_end);
> > @@ -335,31 +305,128 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct 
> > drm_mm_node *node)
> >             return -ENOSPC;
> >  
> >     node->mm = mm;
> > -   node->allocated = 1;
> >  
> >     list_add(&node->node_list, &hole->node_list);
> > -
> >     drm_mm_interval_tree_add_node(hole, node);
> > +   node->allocated = true;
> > +   node->hole_size = 0;
> > +
> > +   rm_hole(hole);
> > +   if (node->start > hole_start)
> > +           add_hole(hole);
> > +   if (end < hole_end)
> > +           add_hole(node);
> > +
> > +   save_stack(node);
> > +   return 0;
> > +}
> > +EXPORT_SYMBOL(drm_mm_reserve_node);
> > +
> > +static inline struct drm_mm_node *rb_hole_size_to_node(struct rb_node *rb)
> > +{
> > +   return rb_entry_safe(rb, struct drm_mm_node, rb_hole_size);
> > +}
> > +
> > +static inline struct drm_mm_node *rb_hole_addr_to_node(struct rb_node *rb)
> > +{
> > +   return rb_entry_safe(rb, struct drm_mm_node, rb_hole_addr);
> > +}
> >  
> > -   if (node->start == hole_start) {
> > -           hole->hole_follows = 0;
> > -           list_del(&hole->hole_stack);
> > +static inline u64 rb_hole_size(struct rb_node *rb)
> > +{
> > +   return rb_entry(rb, struct drm_mm_node, rb_hole_size)->hole_size;
> > +}
> > +
> > +static struct drm_mm_node *best_hole(struct drm_mm *mm, u64 size)
> > +{
> > +   struct rb_node *best = NULL;
> > +   struct rb_node **link = &mm->holes_size.rb_node;
> > +
> > +   while (*link) {
> > +           struct rb_node *rb = *link;
> > +           if (size <= rb_hole_size(rb)) {
> > +                   link = &rb->rb_left;
> > +                   best = rb;
> > +           } else {
> > +                   link = &rb->rb_right;
> > +           }
> >     }
> >  
> > -   node->hole_follows = 0;
> > -   if (end != hole_end) {
> > -           list_add(&node->hole_stack, &mm->hole_stack);
> > -           node->hole_follows = 1;
> > +   return rb_hole_size_to_node(best);
> > +}
> > +
> > +static struct drm_mm_node *find_hole(struct drm_mm *mm, u64 addr)
> > +{
> > +   struct drm_mm_node *node = NULL;
> > +   struct rb_node **link = &mm->holes_addr.rb_node;
> > +
> > +   while (*link) {
> > +           u64 hole_start;
> > +
> > +           node = rb_hole_addr_to_node(*link);
> > +           hole_start = __drm_mm_hole_node_start(node);
> > +
> > +           if (addr < hole_start)
> > +                   link = &node->rb_hole_addr.rb_left;
> > +           else if (addr > hole_start + node->hole_size)
> > +                   link = &node->rb_hole_addr.rb_right;
> > +           else
> > +                   break;
> >     }
> >  
> > -   save_stack(node);
> > +   return node;
> > +}
> >  
> > -   return 0;
> > +static struct drm_mm_node *
> > +first_hole(struct drm_mm *mm,
> > +      u64 start, u64 end, u64 size,
> > +      enum drm_mm_insert_mode mode)
> > +{
> > +   if (RB_EMPTY_ROOT(&mm->holes_size))
> > +           return NULL;
> > +
> > +   switch (mode) {
> > +   default:
> > +   case DRM_MM_INSERT_BEST:
> > +           return best_hole(mm, size);
> > +
> > +   case DRM_MM_INSERT_LOW:
> > +           return find_hole(mm, start);
> > +
> > +   case DRM_MM_INSERT_HIGH:
> > +           return find_hole(mm, end);
> > +
> > +   case DRM_MM_INSERT_EVICT:
> > +           return list_first_entry_or_null(&mm->hole_stack,
> > +                                           struct drm_mm_node,
> > +                                           hole_stack);
> > +   }
> > +}
> > +
> > +static struct drm_mm_node *
> > +next_hole(struct drm_mm *mm,
> > +     struct drm_mm_node *node,
> > +     enum drm_mm_insert_mode mode)
> > +{
> > +   switch (mode) {
> > +   default:
> > +   case DRM_MM_INSERT_BEST:
> > +           return rb_hole_size_to_node(rb_next(&node->rb_hole_size));
> > +
> > +   case DRM_MM_INSERT_LOW:
> > +           return rb_hole_addr_to_node(rb_next(&node->rb_hole_addr));
> > +
> > +   case DRM_MM_INSERT_HIGH:
> > +           return rb_hole_addr_to_node(rb_prev(&node->rb_hole_addr));
> > +
> > +   case DRM_MM_INSERT_EVICT:
> > +           node = list_next_entry(node, hole_stack);
> > +           return &node->hole_stack == &mm->hole_stack ? NULL : node;
> > +   }
> >  }
> > -EXPORT_SYMBOL(drm_mm_reserve_node);
> >  
> >  /**
> > - * drm_mm_insert_node_in_range_generic - ranged search for space and 
> > insert @node
> > + * drm_mm_insert_node_in_range - ranged search for space and insert @node
> >   * @mm: drm_mm to allocate from
> >   * @node: preallocate node to insert
> >   * @size: size of the allocation
> > @@ -367,38 +434,104 @@ EXPORT_SYMBOL(drm_mm_reserve_node);
> >   * @color: opaque tag value to use for this node
> >   * @start: start of the allowed range for this node
> >   * @end: end of the allowed range for this node
> > - * @sflags: flags to fine-tune the allocation search
> > - * @aflags: flags to fine-tune the allocation behavior
> > + * @mode: fine-tune the allocation search and placement
> >   *
> >   * The preallocated node must be cleared to 0.
> >   *
> >   * Returns:
> >   * 0 on success, -ENOSPC if there's no suitable hole.
> >   */
> > -int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct 
> > drm_mm_node *node,
> > -                                   u64 size, u64 alignment,
> > -                                   unsigned long color,
> > -                                   u64 start, u64 end,
> > -                                   enum drm_mm_search_flags sflags,
> > -                                   enum drm_mm_allocator_flags aflags)
> > +int drm_mm_insert_node_in_range(struct drm_mm * const mm,
> > +                           struct drm_mm_node * const node,
> > +                           u64 size, u64 alignment,
> > +                           unsigned long color,
> > +                           u64 range_start, u64 range_end,
> > +                           enum drm_mm_insert_mode mode)
> >  {
> > -   struct drm_mm_node *hole_node;
> > +   struct drm_mm_node *hole;
> > +   u64 remainder_mask;
> >  
> > -   if (WARN_ON(size == 0))
> > -           return -EINVAL;
> > +   DRM_MM_BUG_ON(range_start >= range_end);
> >  
> > -   hole_node = drm_mm_search_free_in_range_generic(mm,
> > -                                                   size, alignment, color,
> > -                                                   start, end, sflags);
> > -   if (!hole_node)
> > +   if (unlikely(size == 0 || range_end - range_start < size))
> >             return -ENOSPC;
> >  
> > -   drm_mm_insert_helper(hole_node, node,
> > -                        size, alignment, color,
> > -                        start, end, aflags);
> > -   return 0;
> > +   if (alignment <= 1)
> > +           alignment = 0;
> > +
> > +   remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
> > +   for (hole = first_hole(mm, range_start, range_end, size, mode); hole;
> > +        hole = next_hole(mm, hole, mode)) {
> > +           u64 hole_start = __drm_mm_hole_node_start(hole);
> > +           u64 hole_end = hole_start + hole->hole_size;
> > +           u64 adj_start, adj_end;
> > +           u64 col_start, col_end;
> > +
> > +           if (mode == DRM_MM_INSERT_LOW && hole_start >= range_end)
> > +                   break;
> > +
> > +           if (mode == DRM_MM_INSERT_HIGH && hole_end <= range_start)
> > +                   break;
> > +
> > +           col_start = hole_start;
> > +           col_end = hole_end;
> > +           if (mm->color_adjust)
> > +                   mm->color_adjust(hole, color, &col_start, &col_end);
> > +
> > +           adj_start = max(col_start, range_start);
> > +           adj_end = min(col_end, range_end);
> > +
> > +           if (adj_end <= adj_start || adj_end - adj_start < size)
> > +                   continue;
> > +
> > +           if (mode == DRM_MM_INSERT_HIGH)
> > +                   adj_start = adj_end - size;
> > +
> > +           if (alignment) {
> > +                   u64 rem;
> > +
> > +                   if (likely(remainder_mask))
> > +                           rem = adj_start & remainder_mask;
> > +                   else
> > +                           div64_u64_rem(adj_start, alignment, &rem);
> > +                   if (rem) {
> > +                           adj_start -= rem;
> > +                           if (mode != DRM_MM_INSERT_HIGH)
> > +                                   adj_start += alignment;
> > +
> > +                           if (adj_start < max(col_start, range_start) ||
> > +                               min(col_end, range_end) - adj_start < size)
> > +                                   continue;
> > +
> > +                           if (adj_end <= adj_start ||
> > +                               adj_end - adj_start < size)
> > +                                   continue;
> > +                   }
> > +           }
> > +
> > +           node->mm = mm;
> > +           node->size = size;
> > +           node->start = adj_start;
> > +           node->color = color;
> > +           node->hole_size = 0;
> > +
> > +           list_add(&node->node_list, &hole->node_list);
> > +           drm_mm_interval_tree_add_node(hole, node);
> > +           node->allocated = true;
> > +
> > +           rm_hole(hole);
> > +           if (adj_start > hole_start)
> > +                   add_hole(hole);
> > +           if (adj_start + size < hole_end)
> > +                   add_hole(node);
> > +
> > +           save_stack(node);
> > +           return 0;
> > +   }
> > +
> > +   return -ENOSPC;
> >  }
> > -EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
> > +EXPORT_SYMBOL(drm_mm_insert_node_in_range);
> >  
> >  /**
> >   * drm_mm_remove_node - Remove a memory node from the allocator.
> > @@ -416,92 +549,20 @@ void drm_mm_remove_node(struct drm_mm_node *node)
> >     DRM_MM_BUG_ON(!node->allocated);
> >     DRM_MM_BUG_ON(node->scanned_block);
> >  
> > -   prev_node =
> > -       list_entry(node->node_list.prev, struct drm_mm_node, node_list);
> > -
> > -   if (drm_mm_hole_follows(node)) {
> > -           DRM_MM_BUG_ON(__drm_mm_hole_node_start(node) ==
> > -                         __drm_mm_hole_node_end(node));
> > -           list_del(&node->hole_stack);
> > -   } else {
> > -           DRM_MM_BUG_ON(__drm_mm_hole_node_start(node) !=
> > -                         __drm_mm_hole_node_end(node));
> > -   }
> > +   prev_node = list_prev_entry(node, node_list);
> >  
> > -   if (!drm_mm_hole_follows(prev_node)) {
> > -           prev_node->hole_follows = 1;
> > -           list_add(&prev_node->hole_stack, &mm->hole_stack);
> > -   } else
> > -           list_move(&prev_node->hole_stack, &mm->hole_stack);
> > +   if (drm_mm_hole_follows(node))
> > +           rm_hole(node);
> >  
> >     drm_mm_interval_tree_remove(node, &mm->interval_tree);
> >     list_del(&node->node_list);
> > -   node->allocated = 0;
> > -}
> > -EXPORT_SYMBOL(drm_mm_remove_node);
> > -
> > -static int check_free_hole(u64 start, u64 end, u64 size, u64 alignment)
> > -{
> > -   if (end - start < size)
> > -           return 0;
> > -
> > -   if (alignment) {
> > -           u64 rem;
> > -
> > -           div64_u64_rem(start, alignment, &rem);
> > -           if (rem)
> > -                   start += alignment - rem;
> > -   }
> > -
> > -   return end >= start + size;
> > -}
> > -
> > -static struct drm_mm_node *drm_mm_search_free_in_range_generic(const 
> > struct drm_mm *mm,
> > -                                                   u64 size,
> > -                                                   u64 alignment,
> > -                                                   unsigned long color,
> > -                                                   u64 start,
> > -                                                   u64 end,
> > -                                                   enum 
> > drm_mm_search_flags flags)
> > -{
> > -   struct drm_mm_node *entry;
> > -   struct drm_mm_node *best;
> > -   u64 adj_start;
> > -   u64 adj_end;
> > -   u64 best_size;
> > -
> > -   DRM_MM_BUG_ON(mm->scan_active);
> > -
> > -   best = NULL;
> > -   best_size = ~0UL;
> > -
> > -   __drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
> > -                          flags & DRM_MM_SEARCH_BELOW) {
> > -           u64 hole_size = adj_end - adj_start;
> > -
> > -           if (mm->color_adjust) {
> > -                   mm->color_adjust(entry, color, &adj_start, &adj_end);
> > -                   if (adj_end <= adj_start)
> > -                           continue;
> > -           }
> > -
> > -           adj_start = max(adj_start, start);
> > -           adj_end = min(adj_end, end);
> > -
> > -           if (!check_free_hole(adj_start, adj_end, size, alignment))
> > -                   continue;
> > -
> > -           if (!(flags & DRM_MM_SEARCH_BEST))
> > -                   return entry;
> > -
> > -           if (hole_size < best_size) {
> > -                   best = entry;
> > -                   best_size = hole_size;
> > -           }
> > -   }
> > +   node->allocated = false;
> >  
> > -   return best;
> > +   if (drm_mm_hole_follows(prev_node))
> > +           rm_hole(prev_node);
> > +   add_hole(prev_node);
> >  }
> > +EXPORT_SYMBOL(drm_mm_remove_node);
> >  
> >  /**
> >   * drm_mm_replace_node - move an allocation from @old to @new
> > @@ -516,18 +577,23 @@ void drm_mm_replace_node(struct drm_mm_node *old, 
> > struct drm_mm_node *new)
> >  {
> >     DRM_MM_BUG_ON(!old->allocated);
> >  
> > +   *new = *old;
> > +
> >     list_replace(&old->node_list, &new->node_list);
> > -   list_replace(&old->hole_stack, &new->hole_stack);
> >     rb_replace_node(&old->rb, &new->rb, &old->mm->interval_tree);
> > -   new->hole_follows = old->hole_follows;
> > -   new->mm = old->mm;
> > -   new->start = old->start;
> > -   new->size = old->size;
> > -   new->color = old->color;
> > -   new->__subtree_last = old->__subtree_last;
> > -
> > -   old->allocated = 0;
> > -   new->allocated = 1;
> > +
> > +   if (drm_mm_hole_follows(old)) {
> > +           list_replace(&old->hole_stack, &new->hole_stack);
> > +           rb_replace_node(&old->rb_hole_size,
> > +                           &new->rb_hole_size,
> > +                           &old->mm->holes_size);
> > +           rb_replace_node(&old->rb_hole_addr,
> > +                           &new->rb_hole_addr,
> > +                           &old->mm->holes_addr);
> > +   }
> > +
> > +   old->allocated = false;
> > +   new->allocated = true;
> >  }
> >  EXPORT_SYMBOL(drm_mm_replace_node);
> >  
> > @@ -570,7 +636,7 @@ EXPORT_SYMBOL(drm_mm_replace_node);
> >   * @color: opaque tag value to use for the allocation
> >   * @start: start of the allowed range for the allocation
> >   * @end: end of the allowed range for the allocation
> > - * @flags: flags to specify how the allocation will be performed afterwards
> > + * @mode: fine-tune the allocation search and placement
> >   *
> >   * This simply sets up the scanning routines with the parameters for the 
> > desired
> >   * hole.
> > @@ -586,7 +652,7 @@ void drm_mm_scan_init_with_range(struct drm_mm_scan 
> > *scan,
> >                              unsigned long color,
> >                              u64 start,
> >                              u64 end,
> > -                            unsigned int flags)
> > +                            enum drm_mm_insert_mode mode)
> >  {
> >     DRM_MM_BUG_ON(start >= end);
> >     DRM_MM_BUG_ON(!size || size > end - start);
> > @@ -601,7 +667,7 @@ void drm_mm_scan_init_with_range(struct drm_mm_scan 
> > *scan,
> >     scan->alignment = alignment;
> >     scan->remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
> >     scan->size = size;
> > -   scan->flags = flags;
> > +   scan->mode = mode;
> >  
> >     DRM_MM_BUG_ON(end <= start);
> >     scan->range_start = start;
> > @@ -659,7 +725,7 @@ bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
> >     if (adj_end <= adj_start || adj_end - adj_start < scan->size)
> >             return false;
> >  
> > -   if (scan->flags == DRM_MM_CREATE_TOP)
> > +   if (scan->mode == DRM_MM_INSERT_HIGH)
> >             adj_start = adj_end - scan->size;
> >  
> >     if (scan->alignment) {
> > @@ -671,7 +737,7 @@ bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
> >                     div64_u64_rem(adj_start, scan->alignment, &rem);
> >             if (rem) {
> >                     adj_start -= rem;
> > -                   if (scan->flags != DRM_MM_CREATE_TOP)
> > +                   if (scan->mode != DRM_MM_INSERT_HIGH)
> >                             adj_start += scan->alignment;
> >                     if (adj_start < max(col_start, scan->range_start) ||
> >                         min(col_end, scan->range_end) - adj_start < 
> > scan->size)
> > @@ -765,7 +831,7 @@ struct drm_mm_node *drm_mm_scan_color_evict(struct 
> > drm_mm_scan *scan)
> >  
> >     hole = list_first_entry(&mm->hole_stack, typeof(*hole), hole_stack);
> >     hole_start = __drm_mm_hole_node_start(hole);
> > -   hole_end = __drm_mm_hole_node_end(hole);
> > +   hole_end = hole_start + hole->hole_size;
> >  
> >     DRM_MM_BUG_ON(hole_start > scan->hit_start);
> >     DRM_MM_BUG_ON(hole_end < scan->hit_end);
> > @@ -792,21 +858,22 @@ void drm_mm_init(struct drm_mm *mm, u64 start, u64 
> > size)
> >  {
> >     DRM_MM_BUG_ON(start + size <= start);
> >  
> > +   mm->color_adjust = NULL;
> > +
> >     INIT_LIST_HEAD(&mm->hole_stack);
> > -   mm->scan_active = 0;
> > +   mm->interval_tree = RB_ROOT;
> > +   mm->holes_size = RB_ROOT;
> > +   mm->holes_addr = RB_ROOT;
> >  
> >     /* Clever trick to avoid a special case in the free hole tracking. */
> >     INIT_LIST_HEAD(&mm->head_node.node_list);
> > -   mm->head_node.allocated = 0;
> > -   mm->head_node.hole_follows = 1;
> > +   mm->head_node.allocated = false;
> >     mm->head_node.mm = mm;
> >     mm->head_node.start = start + size;
> > -   mm->head_node.size = start - mm->head_node.start;
> > -   list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
> > -
> > -   mm->interval_tree = RB_ROOT;
> > +   mm->head_node.size = -size;
> > +   add_hole(&mm->head_node);
> >  
> > -   mm->color_adjust = NULL;
> > +   mm->scan_active = 0;
> >  }
> >  EXPORT_SYMBOL(drm_mm_init);
> >  
> > @@ -828,18 +895,16 @@ EXPORT_SYMBOL(drm_mm_takedown);
> >  static u64 drm_mm_debug_hole(const struct drm_mm_node *entry,
> >                          const char *prefix)
> >  {
> > -   u64 hole_start, hole_end, hole_size;
> > -
> > -   if (entry->hole_follows) {
> > -           hole_start = drm_mm_hole_node_start(entry);
> > -           hole_end = drm_mm_hole_node_end(entry);
> > -           hole_size = hole_end - hole_start;
> > -           pr_debug("%s %#llx-%#llx: %llu: free\n", prefix, hole_start,
> > -                    hole_end, hole_size);
> > -           return hole_size;
> > +   u64 start, size;
> > +
> > +   size = entry->hole_size;
> > +   if (size) {
> > +           start = drm_mm_hole_node_start(entry);
> > +           pr_debug("%s %#llx-%#llx: %llu: free\n",
> > +                    prefix, start, start + size, size);
> >     }
> >  
> > -   return 0;
> > +   return size;
> >  }
> >  
> >  /**
> > @@ -870,18 +935,16 @@ EXPORT_SYMBOL(drm_mm_debug_table);
> >  #if defined(CONFIG_DEBUG_FS)
> >  static u64 drm_mm_dump_hole(struct seq_file *m, const struct drm_mm_node 
> > *entry)
> >  {
> > -   u64 hole_start, hole_end, hole_size;
> > -
> > -   if (entry->hole_follows) {
> > -           hole_start = drm_mm_hole_node_start(entry);
> > -           hole_end = drm_mm_hole_node_end(entry);
> > -           hole_size = hole_end - hole_start;
> > -           seq_printf(m, "%#018llx-%#018llx: %llu: free\n", hole_start,
> > -                      hole_end, hole_size);
> > -           return hole_size;
> > +   u64 start, size;
> > +
> > +   size = entry->hole_size;
> > +   if (size) {
> > +           start = drm_mm_hole_node_start(entry);
> > +           seq_printf(m, "%#018llx-%#018llx: %llu: free\n",
> > +                      start, start + size, size);
> >     }
> >  
> > -   return 0;
> > +   return size;
> >  }
> >  
> >  /**
> > diff --git a/drivers/gpu/drm/drm_vma_manager.c 
> > b/drivers/gpu/drm/drm_vma_manager.c
> > index 20cc33d1bfc1..d9100b565198 100644
> > --- a/drivers/gpu/drm/drm_vma_manager.c
> > +++ b/drivers/gpu/drm/drm_vma_manager.c
> > @@ -212,8 +212,7 @@ int drm_vma_offset_add(struct drm_vma_offset_manager 
> > *mgr,
> >             goto out_unlock;
> >     }
> >  
> > -   ret = drm_mm_insert_node(&mgr->vm_addr_space_mm, &node->vm_node,
> > -                            pages, 0, DRM_MM_SEARCH_DEFAULT);
> > +   ret = drm_mm_insert_node(&mgr->vm_addr_space_mm, &node->vm_node, pages);
> >     if (ret)
> >             goto out_unlock;
> >  
> > diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c 
> > b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
> > index 2dae3169ce48..69ab98c143dc 100644
> > --- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
> > +++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
> > @@ -107,6 +107,7 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu 
> > *mmu,
> >                                struct drm_mm_node *node, size_t size)
> >  {
> >     struct etnaviv_vram_mapping *free = NULL;
> > +   enum drm_mm_insert_mode mode = DRM_MM_INSERT_LOW;
> >     int ret;
> >  
> >     lockdep_assert_held(&mmu->lock);
> > @@ -118,8 +119,9 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu 
> > *mmu,
> >             bool found;
> >  
> >             ret = drm_mm_insert_node_in_range(&mmu->mm, node,
> > -                   size, 0, mmu->last_iova, ~0UL,
> > -                   DRM_MM_SEARCH_DEFAULT);
> > +                                             size, 0, 0,
> > +                                             mmu->last_iova, U64_MAX,
> > +                                             mode);
> >  
> >             if (ret != -ENOSPC)
> >                     break;
> > @@ -187,6 +189,8 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu 
> > *mmu,
> >                     list_del_init(&m->scan_node);
> >             }
> >  
> > +           mode = DRM_MM_INSERT_EVICT;
> > +
> >             /*
> >              * We removed enough mappings so that the new allocation will
> >              * succeed.  Ensure that the MMU will be flushed before the
> > diff --git a/drivers/gpu/drm/i915/i915_gem.c 
> > b/drivers/gpu/drm/i915/i915_gem.c
> > index 5275f6248ce3..cc4e0224968f 100644
> > --- a/drivers/gpu/drm/i915/i915_gem.c
> > +++ b/drivers/gpu/drm/i915/i915_gem.c
> > @@ -69,12 +69,10 @@ insert_mappable_node(struct i915_ggtt *ggtt,
> >                       struct drm_mm_node *node, u32 size)
> >  {
> >     memset(node, 0, sizeof(*node));
> > -   return drm_mm_insert_node_in_range_generic(&ggtt->base.mm, node,
> > -                                              size, 0,
> > -                                              I915_COLOR_UNEVICTABLE,
> > -                                              0, ggtt->mappable_end,
> > -                                              DRM_MM_SEARCH_DEFAULT,
> > -                                              DRM_MM_CREATE_DEFAULT);
> > +   return drm_mm_insert_node_in_range(&ggtt->base.mm, node,
> > +                                      size, 0, I915_COLOR_UNEVICTABLE,
> > +                                      0, ggtt->mappable_end,
> > +                                      DRM_MM_INSERT_LOW);
> >  }
> >  
> >  static void
> > diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c 
> > b/drivers/gpu/drm/i915/i915_gem_evict.c
> > index 50129ec1caab..f92f63e8749a 100644
> > --- a/drivers/gpu/drm/i915/i915_gem_evict.c
> > +++ b/drivers/gpu/drm/i915/i915_gem_evict.c
> > @@ -109,6 +109,7 @@ i915_gem_evict_something(struct i915_address_space *vm,
> >     }, **phase;
> >     struct i915_vma *vma, *next;
> >     struct drm_mm_node *node;
> > +   enum drm_mm_insert_mode mode;
> >     int ret;
> >  
> >     lockdep_assert_held(&vm->i915->drm.struct_mutex);
> > @@ -127,10 +128,14 @@ i915_gem_evict_something(struct i915_address_space 
> > *vm,
> >      * On each list, the oldest objects lie at the HEAD with the freshest
> >      * object on the TAIL.
> >      */
> > +   mode = DRM_MM_INSERT_BEST;
> > +   if (flags & PIN_HIGH)
> > +           mode = DRM_MM_INSERT_HIGH;
> > +   if (flags & PIN_MAPPABLE)
> > +           mode = DRM_MM_INSERT_LOW;
> >     drm_mm_scan_init_with_range(&scan, &vm->mm,
> >                                 min_size, alignment, cache_level,
> > -                               start, end,
> > -                               flags & PIN_HIGH ? DRM_MM_CREATE_TOP : 0);
> > +                               start, end, mode);
> >  
> >     /* Retire before we search the active list. Although we have
> >      * reasonable accuracy in our retirement lists, we may have
> > diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c 
> > b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> > index c64438f8171c..3e435caf7221 100644
> > --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> > +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> > @@ -436,12 +436,11 @@ static void *reloc_iomap(struct drm_i915_gem_object 
> > *obj,
> >                                            PIN_MAPPABLE | PIN_NONBLOCK);
> >             if (IS_ERR(vma)) {
> >                     memset(&cache->node, 0, sizeof(cache->node));
> > -                   ret = drm_mm_insert_node_in_range_generic
> > +                   ret = drm_mm_insert_node_in_range
> >                             (&ggtt->base.mm, &cache->node,
> >                              4096, 0, I915_COLOR_UNEVICTABLE,
> >                              0, ggtt->mappable_end,
> > -                            DRM_MM_SEARCH_DEFAULT,
> > -                            DRM_MM_CREATE_DEFAULT);
> > +                            DRM_MM_INSERT_LOW);
> >                     if (ret) /* no inactive aperture space, use cpu reloc */
> >                             return NULL;
> >             } else {
> > diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c 
> > b/drivers/gpu/drm/i915/i915_gem_gtt.c
> > index c8f1675852a7..939b862bfcfa 100644
> > --- a/drivers/gpu/drm/i915/i915_gem_gtt.c
> > +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
> > @@ -2722,12 +2722,10 @@ int i915_gem_init_ggtt(struct drm_i915_private 
> > *dev_priv)
> >             return ret;
> >  
> >     /* Reserve a mappable slot for our lockless error capture */
> > -   ret = drm_mm_insert_node_in_range_generic(&ggtt->base.mm,
> > -                                             &ggtt->error_capture,
> > -                                             4096, 0,
> > -                                             I915_COLOR_UNEVICTABLE,
> > -                                             0, ggtt->mappable_end,
> > -                                             0, 0);
> > +   ret = drm_mm_insert_node_in_range(&ggtt->base.mm, &ggtt->error_capture,
> > +                                     4096, 0, I915_COLOR_UNEVICTABLE,
> > +                                     0, ggtt->mappable_end,
> > +                                     DRM_MM_INSERT_LOW);
> >     if (ret)
> >             return ret;
> >  
> > @@ -3542,18 +3540,16 @@ int i915_gem_gtt_insert(struct i915_address_space 
> > *vm,
> >                     u64 size, u64 alignment, unsigned long color,
> >                     u64 start, u64 end, unsigned int flags)
> >  {
> > -   u32 search_flag, alloc_flag;
> > +   enum drm_mm_insert_mode mode;
> >     int err;
> >  
> >     lockdep_assert_held(&vm->i915->drm.struct_mutex);
> >  
> > -   if (flags & PIN_HIGH) {
> > -           search_flag = DRM_MM_SEARCH_BELOW;
> > -           alloc_flag = DRM_MM_CREATE_TOP;
> > -   } else {
> > -           search_flag = DRM_MM_SEARCH_DEFAULT;
> > -           alloc_flag = DRM_MM_CREATE_DEFAULT;
> > -   }
> > +   mode = DRM_MM_INSERT_BEST;
> > +   if (flags & PIN_HIGH)
> > +           mode = DRM_MM_INSERT_HIGH;
> > +   if (flags & PIN_MAPPABLE)
> > +           mode = DRM_MM_INSERT_LOW;
> >  
> >     /* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
> >      * so we know that we always have a minimum alignment of 4096.
> > @@ -3565,10 +3561,9 @@ int i915_gem_gtt_insert(struct i915_address_space 
> > *vm,
> >     if (alignment <= 4096)
> >             alignment = 0;
> >  
> > -   err = drm_mm_insert_node_in_range_generic(&vm->mm, node,
> > -                                             size, alignment, color,
> > -                                             start, end,
> > -                                             search_flag, alloc_flag);
> > +   err = drm_mm_insert_node_in_range(&vm->mm, node,
> > +                                     size, alignment, color,
> > +                                     start, end, mode);
> >     if (err != -ENOSPC)
> >             return err;
> >  
> > @@ -3577,9 +3572,7 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
> >     if (err)
> >             return err;
> >  
> > -   search_flag = DRM_MM_SEARCH_DEFAULT;
> > -   return drm_mm_insert_node_in_range_generic(&vm->mm, node,
> > -                                              size, alignment, color,
> > -                                              start, end,
> > -                                              search_flag, alloc_flag);
> > +   return drm_mm_insert_node_in_range(&vm->mm, node,
> > +                                      size, alignment, color,
> > +                                      start, end, DRM_MM_INSERT_EVICT);
> >  }
> > diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c 
> > b/drivers/gpu/drm/i915/i915_gem_stolen.c
> > index f1a80bfa9919..f6d507faf83b 100644
> > --- a/drivers/gpu/drm/i915/i915_gem_stolen.c
> > +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
> > @@ -55,9 +55,9 @@ int i915_gem_stolen_insert_node_in_range(struct 
> > drm_i915_private *dev_priv,
> >             return -ENODEV;
> >  
> >     mutex_lock(&dev_priv->mm.stolen_lock);
> > -   ret = drm_mm_insert_node_in_range(&dev_priv->mm.stolen, node, size,
> > -                                     alignment, start, end,
> > -                                     DRM_MM_SEARCH_DEFAULT);
> > +   ret = drm_mm_insert_node_in_range(&dev_priv->mm.stolen, node,
> > +                                     size, alignment, 0,
> > +                                     start, end, DRM_MM_INSERT_BEST);
> >     mutex_unlock(&dev_priv->mm.stolen_lock);
> >  
> >     return ret;
> > diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
> > index cd06cfd94687..412669062cb7 100644
> > --- a/drivers/gpu/drm/msm/msm_gem.c
> > +++ b/drivers/gpu/drm/msm/msm_gem.c
> > @@ -54,8 +54,7 @@ static struct page **get_pages_vram(struct drm_gem_object 
> > *obj,
> >     if (!p)
> >             return ERR_PTR(-ENOMEM);
> >  
> > -   ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node,
> > -                   npages, 0, DRM_MM_SEARCH_DEFAULT);
> > +   ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
> >     if (ret) {
> >             drm_free_large(p);
> >             return ERR_PTR(ret);
> > diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c 
> > b/drivers/gpu/drm/msm/msm_gem_vma.c
> > index a311d26ccb21..b654eca7636a 100644
> > --- a/drivers/gpu/drm/msm/msm_gem_vma.c
> > +++ b/drivers/gpu/drm/msm/msm_gem_vma.c
> > @@ -45,8 +45,7 @@ msm_gem_map_vma(struct msm_gem_address_space *aspace,
> >     if (WARN_ON(drm_mm_node_allocated(&vma->node)))
> >             return 0;
> >  
> > -   ret = drm_mm_insert_node(&aspace->mm, &vma->node, npages,
> > -                   0, DRM_MM_SEARCH_DEFAULT);
> > +   ret = drm_mm_insert_node(&aspace->mm, &vma->node, npages);
> >     if (ret)
> >             return ret;
> >  
> > diff --git a/drivers/gpu/drm/selftests/test-drm_mm.c 
> > b/drivers/gpu/drm/selftests/test-drm_mm.c
> > index 2ce92f4dcfc7..a07c04a6c4b9 100644
> > --- a/drivers/gpu/drm/selftests/test-drm_mm.c
> > +++ b/drivers/gpu/drm/selftests/test-drm_mm.c
> > @@ -22,23 +22,24 @@ static unsigned int max_iterations = 8192;
> >  static unsigned int max_prime = 128;
> >  
> >  enum {
> > -   DEFAULT,
> > -   TOPDOWN,
> >     BEST,
> > +   BOTTOMUP,
> > +   TOPDOWN,
> > +   EVICT,
> >  };
> >  
> >  static const struct insert_mode {
> >     const char *name;
> > -   unsigned int search_flags;
> > -   unsigned int create_flags;
> > +   enum drm_mm_insert_mode mode;
> >  } insert_modes[] = {
> > -   [DEFAULT] = { "default", DRM_MM_SEARCH_DEFAULT, DRM_MM_CREATE_DEFAULT },
> > -   [TOPDOWN] = { "top-down", DRM_MM_SEARCH_BELOW, DRM_MM_CREATE_TOP },
> > -   [BEST] = { "best", DRM_MM_SEARCH_BEST, DRM_MM_CREATE_DEFAULT },
> > +   [BEST] = { "best", DRM_MM_INSERT_BEST },
> > +   [BOTTOMUP] = { "bottom-up", DRM_MM_INSERT_LOW },
> > +   [TOPDOWN] = { "top-down", DRM_MM_INSERT_HIGH },
> > +   [EVICT] = { "evict", DRM_MM_INSERT_EVICT },
> >     {}
> >  }, evict_modes[] = {
> > -   { "default", DRM_MM_SEARCH_DEFAULT, DRM_MM_CREATE_DEFAULT },
> > -   { "top-down", DRM_MM_SEARCH_BELOW, DRM_MM_CREATE_TOP },
> > +   { "bottom-up", DRM_MM_INSERT_LOW },
> > +   { "top-down", DRM_MM_INSERT_HIGH },
> >     {}
> >  };
> >  
> > @@ -522,8 +523,7 @@ static bool expect_insert(struct drm_mm *mm, struct 
> > drm_mm_node *node,
> >  
> >     err = drm_mm_insert_node_generic(mm, node,
> >                                      size, alignment, color,
> > -                                    mode->search_flags,
> > -                                    mode->create_flags);
> > +                                    mode->mode);
> >     if (err) {
> >             pr_err("insert (size=%llu, alignment=%llu, color=%lu, mode=%s) 
> > failed with err=%d\n",
> >                    size, alignment, color, mode->name, err);
> > @@ -543,7 +543,7 @@ static bool expect_insert_fail(struct drm_mm *mm, u64 
> > size)
> >     struct drm_mm_node tmp = {};
> >     int err;
> >  
> > -   err = drm_mm_insert_node(mm, &tmp, size, 0, DRM_MM_SEARCH_DEFAULT);
> > +   err = drm_mm_insert_node(mm, &tmp, size);
> >     if (likely(err == -ENOSPC))
> >             return true;
> >  
> > @@ -749,11 +749,10 @@ static bool expect_insert_in_range(struct drm_mm *mm, 
> > struct drm_mm_node *node,
> >  {
> >     int err;
> >  
> > -   err = drm_mm_insert_node_in_range_generic(mm, node,
> > -                                             size, alignment, color,
> > -                                             range_start, range_end,
> > -                                             mode->search_flags,
> > -                                             mode->create_flags);
> > +   err = drm_mm_insert_node_in_range(mm, node,
> > +                                     size, alignment, color,
> > +                                     range_start, range_end,
> > +                                     mode->mode);
> >     if (err) {
> >             pr_err("insert (size=%llu, alignment=%llu, color=%lu, mode=%s) 
> > nto range [%llx, %llx] failed with err=%d\n",
> >                    size, alignment, color, mode->name,
> > @@ -777,11 +776,10 @@ static bool expect_insert_in_range_fail(struct drm_mm 
> > *mm,
> >     struct drm_mm_node tmp = {};
> >     int err;
> >  
> > -   err = drm_mm_insert_node_in_range_generic(mm, &tmp,
> > -                                             size, 0, 0,
> > -                                             range_start, range_end,
> > -                                             DRM_MM_SEARCH_DEFAULT,
> > -                                             DRM_MM_CREATE_DEFAULT);
> > +   err = drm_mm_insert_node_in_range(mm, &tmp,
> > +                                     size, 0, 0,
> > +                                     range_start, range_end,
> > +                                     0);
> >     if (likely(err == -ENOSPC))
> >             return true;
> >  
> > @@ -1314,7 +1312,7 @@ static int evict_something(struct drm_mm *mm,
> >     drm_mm_scan_init_with_range(&scan, mm,
> >                                 size, alignment, 0,
> >                                 range_start, range_end,
> > -                               mode->create_flags);
> > +                               mode->mode);
> >     if (!evict_nodes(&scan,
> >                      nodes, order, count, false,
> >                      &evict_list))
> > @@ -1322,8 +1320,7 @@ static int evict_something(struct drm_mm *mm,
> >  
> >     memset(&tmp, 0, sizeof(tmp));
> >     err = drm_mm_insert_node_generic(mm, &tmp, size, alignment, 0,
> > -                                    mode->search_flags,
> > -                                    mode->create_flags);
> > +                                    DRM_MM_INSERT_EVICT);
> >     if (err) {
> >             pr_err("Failed to insert into eviction hole: size=%d, 
> > align=%d\n",
> >                    size, alignment);
> > @@ -1398,8 +1395,7 @@ static int igt_evict(void *ignored)
> >     ret = -EINVAL;
> >     drm_mm_init(&mm, 0, size);
> >     for (n = 0; n < size; n++) {
> > -           err = drm_mm_insert_node(&mm, &nodes[n].node, 1, 0,
> > -                                    DRM_MM_SEARCH_DEFAULT);
> > +           err = drm_mm_insert_node(&mm, &nodes[n].node, 1);
> >             if (err) {
> >                     pr_err("insert failed, step %d\n", n);
> >                     ret = err;
> > @@ -1507,8 +1503,7 @@ static int igt_evict_range(void *ignored)
> >     ret = -EINVAL;
> >     drm_mm_init(&mm, 0, size);
> >     for (n = 0; n < size; n++) {
> > -           err = drm_mm_insert_node(&mm, &nodes[n].node, 1, 0,
> > -                                    DRM_MM_SEARCH_DEFAULT);
> > +           err = drm_mm_insert_node(&mm, &nodes[n].node, 1);
> >             if (err) {
> >                     pr_err("insert failed, step %d\n", n);
> >                     ret = err;
> > @@ -1894,7 +1889,7 @@ static int evict_color(struct drm_mm *mm,
> >     drm_mm_scan_init_with_range(&scan, mm,
> >                                 size, alignment, color,
> >                                 range_start, range_end,
> > -                               mode->create_flags);
> > +                               mode->mode);
> >     if (!evict_nodes(&scan,
> >                      nodes, order, count, true,
> >                      &evict_list))
> > @@ -1902,8 +1897,7 @@ static int evict_color(struct drm_mm *mm,
> >  
> >     memset(&tmp, 0, sizeof(tmp));
> >     err = drm_mm_insert_node_generic(mm, &tmp, size, alignment, color,
> > -                                    mode->search_flags,
> > -                                    mode->create_flags);
> > +                                    DRM_MM_INSERT_EVICT);
> >     if (err) {
> >             pr_err("Failed to insert into eviction hole: size=%d, align=%d, 
> > color=%lu, err=%d\n",
> >                    size, alignment, color, err);
> > diff --git a/drivers/gpu/drm/sis/sis_mm.c b/drivers/gpu/drm/sis/sis_mm.c
> > index 03defda77766..1622db24cd39 100644
> > --- a/drivers/gpu/drm/sis/sis_mm.c
> > +++ b/drivers/gpu/drm/sis/sis_mm.c
> > @@ -109,8 +109,7 @@ static int sis_drm_alloc(struct drm_device *dev, struct 
> > drm_file *file,
> >     if (pool == AGP_TYPE) {
> >             retval = drm_mm_insert_node(&dev_priv->agp_mm,
> >                                         &item->mm_node,
> > -                                       mem->size, 0,
> > -                                       DRM_MM_SEARCH_DEFAULT);
> > +                                       mem->size);
> >             offset = item->mm_node.start;
> >     } else {
> >  #if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE)
> > @@ -122,8 +121,7 @@ static int sis_drm_alloc(struct drm_device *dev, struct 
> > drm_file *file,
> >  #else
> >             retval = drm_mm_insert_node(&dev_priv->vram_mm,
> >                                         &item->mm_node,
> > -                                       mem->size, 0,
> > -                                       DRM_MM_SEARCH_DEFAULT);
> > +                                       mem->size);
> >             offset = item->mm_node.start;
> >  #endif
> >     }
> > diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
> > index c08e5279eeac..a6051d6c6de6 100644
> > --- a/drivers/gpu/drm/tegra/gem.c
> > +++ b/drivers/gpu/drm/tegra/gem.c
> > @@ -128,8 +128,8 @@ static int tegra_bo_iommu_map(struct tegra_drm *tegra, 
> > struct tegra_bo *bo)
> >     if (!bo->mm)
> >             return -ENOMEM;
> >  
> > -   err = drm_mm_insert_node_generic(&tegra->mm, bo->mm, bo->gem.size,
> > -                                    PAGE_SIZE, 0, 0, 0);
> > +   err = drm_mm_insert_node_generic(&tegra->mm,
> > +                                    bo->mm, bo->gem.size, PAGE_SIZE, 0, 0);
> >     if (err < 0) {
> >             dev_err(tegra->drm->dev, "out of I/O virtual memory: %zd\n",
> >                     err);
> > diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c 
> > b/drivers/gpu/drm/ttm/ttm_bo_manager.c
> > index aea6a01500e1..493fe6b33353 100644
> > --- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
> > +++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
> > @@ -54,9 +54,8 @@ static int ttm_bo_man_get_node(struct 
> > ttm_mem_type_manager *man,
> >  {
> >     struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
> >     struct drm_mm *mm = &rman->mm;
> > -   struct drm_mm_node *node = NULL;
> > -   enum drm_mm_search_flags sflags = DRM_MM_SEARCH_BEST;
> > -   enum drm_mm_allocator_flags aflags = DRM_MM_CREATE_DEFAULT;
> > +   struct drm_mm_node *node;
> > +   enum drm_mm_insert_mode mode;
> >     unsigned long lpfn;
> >     int ret;
> >  
> > @@ -68,16 +67,15 @@ static int ttm_bo_man_get_node(struct 
> > ttm_mem_type_manager *man,
> >     if (!node)
> >             return -ENOMEM;
> >  
> > -   if (place->flags & TTM_PL_FLAG_TOPDOWN) {
> > -           sflags = DRM_MM_SEARCH_BELOW;
> > -           aflags = DRM_MM_CREATE_TOP;
> > -   }
> > +   mode = DRM_MM_INSERT_BEST;
> > +   if (place->flags & TTM_PL_FLAG_TOPDOWN)
> > +           mode = DRM_MM_INSERT_HIGH;
> >  
> >     spin_lock(&rman->lock);
> > -   ret = drm_mm_insert_node_in_range_generic(mm, node, mem->num_pages,
> > +   ret = drm_mm_insert_node_in_range(mm, node,
> > +                                     mem->num_pages,
> >                                       mem->page_alignment, 0,
> > -                                     place->fpfn, lpfn,
> > -                                     sflags, aflags);
> > +                                     place->fpfn, lpfn, mode);
> >     spin_unlock(&rman->lock);
> >  
> >     if (unlikely(ret)) {
> > diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
> > index a0fd3e66bc4b..baacdf62c83e 100644
> > --- a/drivers/gpu/drm/vc4/vc4_crtc.c
> > +++ b/drivers/gpu/drm/vc4/vc4_crtc.c
> > @@ -589,7 +589,7 @@ static int vc4_crtc_atomic_check(struct drm_crtc *crtc,
> >  
> >     spin_lock_irqsave(&vc4->hvs->mm_lock, flags);
> >     ret = drm_mm_insert_node(&vc4->hvs->dlist_mm, &vc4_state->mm,
> > -                            dlist_count, 1, 0);
> > +                            dlist_count);
> >     spin_unlock_irqrestore(&vc4->hvs->mm_lock, flags);
> >     if (ret)
> >             return ret;
> > diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c
> > index 6fbab1c82cb1..4aba0fa56289 100644
> > --- a/drivers/gpu/drm/vc4/vc4_hvs.c
> > +++ b/drivers/gpu/drm/vc4/vc4_hvs.c
> > @@ -141,8 +141,7 @@ static int vc4_hvs_upload_linear_kernel(struct vc4_hvs 
> > *hvs,
> >     int ret, i;
> >     u32 __iomem *dst_kernel;
> >  
> > -   ret = drm_mm_insert_node(&hvs->dlist_mm, space, VC4_KERNEL_DWORDS, 1,
> > -                            0);
> > +   ret = drm_mm_insert_node(&hvs->dlist_mm, space, VC4_KERNEL_DWORDS);
> >     if (ret) {
> >             DRM_ERROR("Failed to allocate space for filter kernel: %d\n",
> >                       ret);
> > diff --git a/drivers/gpu/drm/vc4/vc4_plane.c 
> > b/drivers/gpu/drm/vc4/vc4_plane.c
> > index 110d1518f5d5..c1f06897136b 100644
> > --- a/drivers/gpu/drm/vc4/vc4_plane.c
> > +++ b/drivers/gpu/drm/vc4/vc4_plane.c
> > @@ -514,9 +514,9 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
> >     if (lbm_size) {
> >             if (!vc4_state->lbm.allocated) {
> >                     spin_lock_irqsave(&vc4->hvs->mm_lock, irqflags);
> > -                   ret = drm_mm_insert_node(&vc4->hvs->lbm_mm,
> > -                                            &vc4_state->lbm,
> > -                                            lbm_size, 32, 0);
> > +                   ret = drm_mm_insert_node_generic(&vc4->hvs->lbm_mm,
> > +                                                    &vc4_state->lbm,
> > +                                                    lbm_size, 32, 0, 0);
> >                     spin_unlock_irqrestore(&vc4->hvs->mm_lock, irqflags);
> >             } else {
> >                     WARN_ON_ONCE(lbm_size != vc4_state->lbm.size);
> > diff --git a/drivers/gpu/drm/via/via_mm.c b/drivers/gpu/drm/via/via_mm.c
> > index a04ef1c992d9..4217d66a5cc6 100644
> > --- a/drivers/gpu/drm/via/via_mm.c
> > +++ b/drivers/gpu/drm/via/via_mm.c
> > @@ -140,11 +140,11 @@ int via_mem_alloc(struct drm_device *dev, void *data,
> >     if (mem->type == VIA_MEM_AGP)
> >             retval = drm_mm_insert_node(&dev_priv->agp_mm,
> >                                         &item->mm_node,
> > -                                       tmpSize, 0, DRM_MM_SEARCH_DEFAULT);
> > +                                       tmpSize);
> >     else
> >             retval = drm_mm_insert_node(&dev_priv->vram_mm,
> >                                         &item->mm_node,
> > -                                       tmpSize, 0, DRM_MM_SEARCH_DEFAULT);
> > +                                       tmpSize);
> >     if (retval)
> >             goto fail_alloc;
> >  
> > diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c 
> > b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
> > index aa04fb0159a7..77cb7c627e09 100644
> > --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
> > +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
> > @@ -673,16 +673,10 @@ static bool vmw_cmdbuf_try_alloc(struct 
> > vmw_cmdbuf_man *man,
> >   
> >     memset(info->node, 0, sizeof(*info->node));
> >     spin_lock_bh(&man->lock);
> > -   ret = drm_mm_insert_node_generic(&man->mm, info->node, info->page_size,
> > -                                    0, 0,
> > -                                    DRM_MM_SEARCH_DEFAULT,
> > -                                    DRM_MM_CREATE_DEFAULT);
> > +   ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
> >     if (ret) {
> >             vmw_cmdbuf_man_process(man);
> > -           ret = drm_mm_insert_node_generic(&man->mm, info->node,
> > -                                            info->page_size, 0, 0,
> > -                                            DRM_MM_SEARCH_DEFAULT,
> > -                                            DRM_MM_CREATE_DEFAULT);
> > +           ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
> >     }
> >  
> >     spin_unlock_bh(&man->lock);
> > diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
> > index 92ec5759caae..6558cd08d7c9 100644
> > --- a/include/drm/drm_mm.h
> > +++ b/include/drm/drm_mm.h
> > @@ -55,32 +55,27 @@
> >  #define DRM_MM_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
> >  #endif
> >  
> > -enum drm_mm_search_flags {
> > -   DRM_MM_SEARCH_DEFAULT =         0,
> > -   DRM_MM_SEARCH_BEST =            1 << 0,
> > -   DRM_MM_SEARCH_BELOW =           1 << 1,
> > +enum drm_mm_insert_mode {
> > +   DRM_MM_INSERT_BEST = 0,
> > +   DRM_MM_INSERT_LOW,
> > +   DRM_MM_INSERT_HIGH,
> > +   DRM_MM_INSERT_EVICT,
> >  };
> >  
> > -enum drm_mm_allocator_flags {
> > -   DRM_MM_CREATE_DEFAULT =         0,
> > -   DRM_MM_CREATE_TOP =             1 << 0,
> > -};
> > -
> > -#define DRM_MM_BOTTOMUP DRM_MM_SEARCH_DEFAULT, DRM_MM_CREATE_DEFAULT
> > -#define DRM_MM_TOPDOWN DRM_MM_SEARCH_BELOW, DRM_MM_CREATE_TOP
> > -
> >  struct drm_mm_node {
> > +   struct drm_mm *mm;
> >     struct list_head node_list;
> >     struct list_head hole_stack;
> >     struct rb_node rb;
> > -   unsigned hole_follows : 1;
> > -   unsigned allocated : 1;
> > -   bool scanned_block : 1;
> > -   unsigned long color;
> > +   struct rb_node rb_hole_size;
> > +   struct rb_node rb_hole_addr;
> >     u64 start;
> >     u64 size;
> >     u64 __subtree_last;
> > -   struct drm_mm *mm;
> > +   u64 hole_size;
> > +   unsigned long color;
> > +   bool allocated : 1;
> > +   bool scanned_block : 1;
> >  #ifdef CONFIG_DRM_DEBUG_MM
> >     depot_stack_handle_t stack;
> >  #endif
> > @@ -94,6 +89,8 @@ struct drm_mm {
> >     struct drm_mm_node head_node;
> >     /* Keep an interval_tree for fast lookup of drm_mm_nodes by address. */
> >     struct rb_root interval_tree;
> > +   struct rb_root holes_size;
> > +   struct rb_root holes_addr;
> >  
> >     void (*color_adjust)(const struct drm_mm_node *node,
> >                          unsigned long color,
> > @@ -116,7 +113,7 @@ struct drm_mm_scan {
> >     u64 hit_end;
> >  
> >     unsigned long color;
> > -   unsigned int flags;
> > +   enum drm_mm_insert_mode mode;
> >  };
> >  
> >  /**
> > @@ -168,7 +165,7 @@ static inline bool drm_mm_initialized(const struct 
> > drm_mm *mm)
> >   */
> >  static inline bool drm_mm_hole_follows(const struct drm_mm_node *node)
> >  {
> > -   return node->hole_follows;
> > +   return node->hole_size;
> >  }
> >  
> >  static inline u64 __drm_mm_hole_node_start(const struct drm_mm_node 
> > *hole_node)
> > @@ -251,14 +248,6 @@ static inline u64 drm_mm_hole_node_end(const struct 
> > drm_mm_node *hole_node)
> >  #define drm_mm_for_each_node_safe(entry, next, mm) \
> >     list_for_each_entry_safe(entry, next, drm_mm_nodes(mm), node_list)
> >  
> > -#define __drm_mm_for_each_hole(entry, mm, hole_start, hole_end, backwards) 
> > \
> > -   for (entry = list_entry((backwards) ? (mm)->hole_stack.prev : 
> > (mm)->hole_stack.next, struct drm_mm_node, hole_stack); \
> > -        &entry->hole_stack != &(mm)->hole_stack ? \
> > -        hole_start = drm_mm_hole_node_start(entry), \
> > -        hole_end = drm_mm_hole_node_end(entry), \
> > -        1 : 0; \
> > -        entry = list_entry((backwards) ? entry->hole_stack.prev : 
> > entry->hole_stack.next, struct drm_mm_node, hole_stack))
> > -
> >  /**
> >   * drm_mm_for_each_hole - iterator to walk over all holes
> >   * @entry: drm_mm_node used internally to track progress
> > @@ -274,57 +263,27 @@ static inline u64 drm_mm_hole_node_end(const struct 
> > drm_mm_node *hole_node)
> >   * Implementation Note:
> >   * We need to inline list_for_each_entry in order to be able to set 
> > hole_start
> >   * and hole_end on each iteration while keeping the macro sane.
> > - *
> > - * The __drm_mm_for_each_hole version is similar, but with added support 
> > for
> > - * going backwards.
> >   */
> > -#define drm_mm_for_each_hole(entry, mm, hole_start, hole_end) \
> > -   __drm_mm_for_each_hole(entry, mm, hole_start, hole_end, 0)
> > +#define drm_mm_for_each_hole(pos, mm, hole_start, hole_end) \
> > +   for (pos = list_first_entry(&(mm)->hole_stack, \
> > +                               typeof(*pos), hole_stack); \
> > +        &pos->hole_stack != &(mm)->hole_stack ? \
> > +        hole_start = drm_mm_hole_node_start(pos), \
> > +        hole_end = hole_start + pos->hole_size : 0; \
> > +        pos = list_next_entry(pos, hole_stack))
> >  
> >  /*
> >   * Basic range manager support (drm_mm.c)
> >   */
> >  int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node);
> > -int drm_mm_insert_node_in_range_generic(struct drm_mm *mm,
> > -                                   struct drm_mm_node *node,
> > -                                   u64 size,
> > -                                   u64 alignment,
> > -                                   unsigned long color,
> > -                                   u64 start,
> > -                                   u64 end,
> > -                                   enum drm_mm_search_flags sflags,
> > -                                   enum drm_mm_allocator_flags aflags);
> > -
> > -/**
> > - * drm_mm_insert_node_in_range - ranged search for space and insert @node
> > - * @mm: drm_mm to allocate from
> > - * @node: preallocate node to insert
> > - * @size: size of the allocation
> > - * @alignment: alignment of the allocation
> > - * @start: start of the allowed range for this node
> > - * @end: end of the allowed range for this node
> > - * @flags: flags to fine-tune the allocation
> > - *
> > - * This is a simplified version of drm_mm_insert_node_in_range_generic() 
> > with
> > - * @color set to 0.
> > - *
> > - * The preallocated node must be cleared to 0.
> > - *
> > - * Returns:
> > - * 0 on success, -ENOSPC if there's no suitable hole.
> > - */
> > -static inline int drm_mm_insert_node_in_range(struct drm_mm *mm,
> > -                                         struct drm_mm_node *node,
> > -                                         u64 size,
> > -                                         u64 alignment,
> > -                                         u64 start,
> > -                                         u64 end,
> > -                                         enum drm_mm_search_flags flags)
> > -{
> > -   return drm_mm_insert_node_in_range_generic(mm, node, size, alignment,
> > -                                              0, start, end, flags,
> > -                                              DRM_MM_CREATE_DEFAULT);
> > -}
> > +int drm_mm_insert_node_in_range(struct drm_mm *mm,
> > +                           struct drm_mm_node *node,
> > +                           u64 size,
> > +                           u64 alignment,
> > +                           unsigned long color,
> > +                           u64 start,
> > +                           u64 end,
> > +                           enum drm_mm_insert_mode mode);
> >  
> >  /**
> >   * drm_mm_insert_node_generic - search for space and insert @node
> > @@ -333,8 +292,7 @@ static inline int drm_mm_insert_node_in_range(struct 
> > drm_mm *mm,
> >   * @size: size of the allocation
> >   * @alignment: alignment of the allocation
> >   * @color: opaque tag value to use for this node
> > - * @sflags: flags to fine-tune the allocation search
> > - * @aflags: flags to fine-tune the allocation behavior
> > + * @mode: fine-tune the allocation search and placement
> >   *
> >   * The preallocated node must be cleared to 0.
> >   *
> > @@ -345,13 +303,11 @@ static inline int
> >  drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
> >                        u64 size, u64 alignment,
> >                        unsigned long color,
> > -                      enum drm_mm_search_flags sflags,
> > -                      enum drm_mm_allocator_flags aflags)
> > +                      enum drm_mm_insert_mode mode)
> >  {
> > -   return drm_mm_insert_node_in_range_generic(mm, node,
> > -                                              size, alignment, 0,
> > -                                              0, U64_MAX,
> > -                                              sflags, aflags);
> > +   return drm_mm_insert_node_in_range(mm, node,
> > +                                      size, alignment, color,
> > +                                      0, U64_MAX, mode);
> >  }
> >  
> >  /**
> > @@ -359,8 +315,6 @@ drm_mm_insert_node_generic(struct drm_mm *mm, struct 
> > drm_mm_node *node,
> >   * @mm: drm_mm to allocate from
> >   * @node: preallocate node to insert
> >   * @size: size of the allocation
> > - * @alignment: alignment of the allocation
> > - * @flags: flags to fine-tune the allocation
> >   *
> >   * This is a simplified version of drm_mm_insert_node_generic() with 
> > @color set
> >   * to 0.
> > @@ -372,13 +326,9 @@ drm_mm_insert_node_generic(struct drm_mm *mm, struct 
> > drm_mm_node *node,
> >   */
> >  static inline int drm_mm_insert_node(struct drm_mm *mm,
> >                                  struct drm_mm_node *node,
> > -                                u64 size,
> > -                                u64 alignment,
> > -                                enum drm_mm_search_flags flags)
> > +                                u64 size)
> >  {
> > -   return drm_mm_insert_node_generic(mm, node,
> > -                                     size, alignment, 0,
> > -                                     flags, DRM_MM_CREATE_DEFAULT);
> > +   return drm_mm_insert_node_generic(mm, node, size, 0, 0, 0);
> >  }
> >  
> >  void drm_mm_remove_node(struct drm_mm_node *node);
> > @@ -425,7 +375,7 @@ void drm_mm_scan_init_with_range(struct drm_mm_scan 
> > *scan,
> >                              struct drm_mm *mm,
> >                              u64 size, u64 alignment, unsigned long color,
> >                              u64 start, u64 end,
> > -                            unsigned int flags);
> > +                            enum drm_mm_insert_mode mode);
> >  
> >  /**
> >   * drm_mm_scan_init - initialize lru scanning
> > @@ -434,7 +384,7 @@ void drm_mm_scan_init_with_range(struct drm_mm_scan 
> > *scan,
> >   * @size: size of the allocation
> >   * @alignment: alignment of the allocation
> >   * @color: opaque tag value to use for the allocation
> > - * @flags: flags to specify how the allocation will be performed afterwards
> > + * @mode: fine-tune the allocation search and placement
> >   *
> >   * This simply sets up the scanning routines with the parameters for the 
> > desired
> >   * hole.
> > @@ -448,12 +398,11 @@ static inline void drm_mm_scan_init(struct 
> > drm_mm_scan *scan,
> >                                 u64 size,
> >                                 u64 alignment,
> >                                 unsigned long color,
> > -                               unsigned int flags)
> > +                               enum drm_mm_insert_mode mode)
> >  {
> >     drm_mm_scan_init_with_range(scan, mm,
> >                                 size, alignment, color,
> > -                               0, U64_MAX,
> > -                               flags);
> > +                               0, U64_MAX, mode);
> >  }
> >  
> >  bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
> > -- 
> > 2.11.0
> > 
> > _______________________________________________
> > Intel-gfx mailing list
> > Intel-gfx at lists.freedesktop.org
> > https://lists.freedesktop.org/mailman/listinfo/intel-gfx
> 
> -- 
> Daniel Vetter
> Software Engineer, Intel Corporation
> http://blog.ffwll.ch

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

Reply via email to