+
if (!block->parent)
continue;
@@ -206,10 +245,14 @@ static int __force_merge(struct drm_buddy *mm,
* block in the next iteration as we would free the
* buddy block as part of the free function.
*/
- if (prev == buddy)
- prev = list_prev_entry(prev, link);
+ if (prev_block && prev_block == buddy) {
+ if (prev_block != first_block)
+ prev_block = rb_entry(rb_prev(&prev_block->rb),
+ struct drm_buddy_block,
+ rb);
+ }
- list_del(&block->link);
+ rbtree_remove(mm, block);
if (drm_buddy_block_is_clear(block))
mm->clear_avail -= drm_buddy_block_size(mm, block);
@@ -258,14 +301,14 @@ int drm_buddy_init(struct drm_buddy *mm,
u64 size, u64 chunk_size)
BUG_ON(mm->max_order > DRM_BUDDY_MAX_ORDER);
- mm->free_list = kmalloc_array(mm->max_order + 1,
- sizeof(struct list_head),
+ mm->free_tree = kmalloc_array(mm->max_order + 1,
+ sizeof(struct rb_root),
GFP_KERNEL);
- if (!mm->free_list)
+ if (!mm->free_tree)
return -ENOMEM;
for (i = 0; i <= mm->max_order; ++i)
- INIT_LIST_HEAD(&mm->free_list[i]);
+ mm->free_tree[i] = RB_ROOT;
mm->n_roots = hweight64(size);
@@ -273,7 +316,7 @@ int drm_buddy_init(struct drm_buddy *mm, u64
size, u64 chunk_size)
sizeof(struct drm_buddy_block *),
GFP_KERNEL);
if (!mm->roots)
- goto out_free_list;
+ goto out_free_tree;
offset = 0;
i = 0;
@@ -312,8 +355,8 @@ int drm_buddy_init(struct drm_buddy *mm, u64
size, u64 chunk_size)
while (i--)
drm_block_free(mm, mm->roots[i]);
kfree(mm->roots);
-out_free_list:
- kfree(mm->free_list);
+out_free_tree:
+ kfree(mm->free_tree);
return -ENOMEM;
}
EXPORT_SYMBOL(drm_buddy_init);
@@ -323,7 +366,7 @@ EXPORT_SYMBOL(drm_buddy_init);
*
* @mm: DRM buddy manager to free
*
- * Cleanup memory manager resources and the freelist
+ * Cleanup memory manager resources and the freetree
*/
void drm_buddy_fini(struct drm_buddy *mm)
{
@@ -350,7 +393,7 @@ void drm_buddy_fini(struct drm_buddy *mm)
WARN_ON(mm->avail != mm->size);
kfree(mm->roots);
- kfree(mm->free_list);
+ kfree(mm->free_tree);
}
EXPORT_SYMBOL(drm_buddy_fini);
@@ -383,7 +426,7 @@ static int split_block(struct drm_buddy *mm,
clear_reset(block);
}
- mark_split(block);
+ mark_split(mm, block);
return 0;
}
@@ -598,7 +641,7 @@ get_maxblock(struct drm_buddy *mm, unsigned int
order,
for (i = order; i <= mm->max_order; ++i) {
struct drm_buddy_block *tmp_block;
- list_for_each_entry_reverse(tmp_block,
&mm->free_list[i], link) {
+ for_each_rb_entry_reverse(tmp_block, &mm->free_tree[i], rb) {
if (block_incompatible(tmp_block, flags))
continue;
@@ -624,7 +667,7 @@ get_maxblock(struct drm_buddy *mm, unsigned
int order,
}
static struct drm_buddy_block *
-alloc_from_freelist(struct drm_buddy *mm,
+alloc_from_freetree(struct drm_buddy *mm,
unsigned int order,
unsigned long flags)
{
@@ -641,7 +684,7 @@ alloc_from_freelist(struct drm_buddy *mm,
for (tmp = order; tmp <= mm->max_order; ++tmp) {
struct drm_buddy_block *tmp_block;
- list_for_each_entry_reverse(tmp_block, &mm-
>free_list[tmp], link) {
+ for_each_rb_entry_reverse(tmp_block, &mm-
>free_tree[tmp], rb) {
if (block_incompatible(tmp_block, flags))
continue;
@@ -657,10 +700,8 @@ alloc_from_freelist(struct drm_buddy *mm,
if (!block) {
/* Fallback method */
for (tmp = order; tmp <= mm->max_order; ++tmp) {
- if (!list_empty(&mm->free_list[tmp])) {
- block = list_last_entry(&mm->free_list[tmp],
- struct drm_buddy_block,
- link);
+ if (!rbtree_is_empty(mm, tmp)) {
+ block = rbtree_last_entry(mm, tmp);
if (block)
break;
}
@@ -728,7 +769,7 @@ static int __alloc_range(struct drm_buddy *mm,
if (contains(start, end, block_start, block_end)) {
if (drm_buddy_block_is_free(block)) {
- mark_allocated(block);
+ mark_allocated(mm, block);
total_allocated += drm_buddy_block_size(mm, block);
mm->avail -= drm_buddy_block_size(mm, block);
if (drm_buddy_block_is_clear(block))
@@ -806,7 +847,6 @@ static int __alloc_contig_try_harder(struct
drm_buddy *mm,
{
u64 rhs_offset, lhs_offset, lhs_size, filled;
struct drm_buddy_block *block;
- struct list_head *list;
LIST_HEAD(blocks_lhs);
unsigned long pages;
unsigned int order;
@@ -819,11 +859,10 @@ static int __alloc_contig_try_harder(struct
drm_buddy *mm,
if (order == 0)
return -ENOSPC;
- list = &mm->free_list[order];
- if (list_empty(list))
+ if (rbtree_is_empty(mm, order))
return -ENOSPC;
- list_for_each_entry_reverse(block, list, link) {
+ for_each_rb_entry_reverse(block, &mm->free_tree[order], rb) {
/* Allocate blocks traversing RHS */
rhs_offset = drm_buddy_block_offset(block);
err = __drm_buddy_alloc_range(mm, rhs_offset, size,
@@ -933,7 +972,7 @@ int drm_buddy_block_trim(struct drm_buddy *mm,
list_add(&block->tmp_link, &dfs);
err = __alloc_range(mm, &dfs, new_start, new_size, blocks,
NULL);
if (err) {
- mark_allocated(block);
+ mark_allocated(mm, block);
mm->avail -= drm_buddy_block_size(mm, block);
if (drm_buddy_block_is_clear(block))
mm->clear_avail -= drm_buddy_block_size(mm, block);
@@ -956,8 +995,8 @@ __drm_buddy_alloc_blocks(struct drm_buddy *mm,
return __drm_buddy_alloc_range_bias(mm, start, end,
order, flags);
else
- /* Allocate from freelist */
- return alloc_from_freelist(mm, order, flags);
+ /* Allocate from freetree */
+ return alloc_from_freetree(mm, order, flags);
}
/**
@@ -974,8 +1013,8 @@ __drm_buddy_alloc_blocks(struct drm_buddy *mm,
* alloc_range_bias() called on range limitations, which traverses
* the tree and returns the desired block.
*
- * alloc_from_freelist() called when *no* range restrictions
- * are enforced, which picks the block from the freelist.
+ * alloc_from_freetree() called when *no* range restrictions
+ * are enforced, which picks the block from the freetree.
*
* Returns:
* 0 on success, error code on failure.
@@ -1077,7 +1116,7 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm,
}
} while (1);
- mark_allocated(block);
+ mark_allocated(mm, block);
mm->avail -= drm_buddy_block_size(mm, block);
if (drm_buddy_block_is_clear(block))
mm->clear_avail -= drm_buddy_block_size(mm, block);
@@ -1161,7 +1200,7 @@ void drm_buddy_print(struct drm_buddy *mm,
struct drm_printer *p)
struct drm_buddy_block *block;
u64 count = 0, free;
- list_for_each_entry(block, &mm->free_list[order], link) {
+ for_each_rb_entry(block, &mm->free_tree[order], rb) {
BUG_ON(!drm_buddy_block_is_free(block));
count++;
}
diff --git a/include/drm/drm_buddy.h b/include/drm/drm_buddy.h
index 9689a7c5dd36..a64d108a33b7 100644
--- a/include/drm/drm_buddy.h
+++ b/include/drm/drm_buddy.h
@@ -10,6 +10,7 @@
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/sched.h>
+#include <linux/rbtree.h>
#include <drm/drm_print.h>
@@ -22,6 +23,41 @@
start__ >= max__ || size__ > max__ - start__; \
})
+/*
+ * for_each_rb_entry() - iterate over an RB tree in order
+ * @pos: the struct type * to use as a loop cursor
+ * @root: pointer to struct rb_root to iterate
+ * @member: name of the rb_node field within the struct
+ */
+#define for_each_rb_entry(pos, root, member) \
+ for (pos = rb_entry_safe(rb_first(root), typeof(*pos), member); \
+ pos; \
+ pos = rb_entry_safe(rb_next(&(pos)->member),
typeof(*pos), member))
+
+/*
+ * for_each_rb_entry_reverse() - iterate over an RB tree in
reverse order
+ * @pos: the struct type * to use as a loop cursor
+ * @root: pointer to struct rb_root to iterate
+ * @member: name of the rb_node field within the struct
+ */
+#define for_each_rb_entry_reverse(pos, root, member) \
+ for (pos = rb_entry_safe(rb_last(root), typeof(*pos), member); \
+ pos; \
+ pos = rb_entry_safe(rb_prev(&(pos)->member),
typeof(*pos), member))
+
+/**
+ * for_each_rb_entry_reverse_safe() - safely iterate over an RB
tree in reverse order
+ * @pos: the struct type * to use as a loop cursor.
+ * @n: another struct type * to use as temporary storage.
+ * @root: pointer to struct rb_root to iterate.
+ * @member: name of the rb_node field within the struct.
+ */
+#define for_each_rb_entry_reverse_safe(pos, n, root, member) \