On Sat, Mar 03, 2018 at 01:45:51PM +0000, Anatoly Burakov wrote: > As we are preparing for dynamic memory allocation, we need to be > able to handle holes in our malloc heap, hence we're switching to > doubly linked list, and prepare infrastructure to support it. > > Since our heap is now aware where are our first and last elements, > there is no longer any need to have a dummy element at the end of > each heap, so get rid of that as well. Instead, let insert/remove/ > join/split operations handle end-of-list conditions automatically. > > Signed-off-by: Anatoly Burakov <anatoly.bura...@intel.com> > --- > lib/librte_eal/common/include/rte_malloc_heap.h | 6 + > lib/librte_eal/common/malloc_elem.c | 200 > +++++++++++++++++++----- > lib/librte_eal/common/malloc_elem.h | 14 +- > lib/librte_eal/common/malloc_heap.c | 8 +- > 4 files changed, 179 insertions(+), 49 deletions(-) > > diff --git a/lib/librte_eal/common/include/rte_malloc_heap.h > b/lib/librte_eal/common/include/rte_malloc_heap.h > index ba99ed9..9ec4b62 100644 > --- a/lib/librte_eal/common/include/rte_malloc_heap.h > +++ b/lib/librte_eal/common/include/rte_malloc_heap.h > @@ -13,12 +13,18 @@ > /* Number of free lists per heap, grouped by size. */ > #define RTE_HEAP_NUM_FREELISTS 13 > > +/* dummy definition, for pointers */ > +struct malloc_elem; > + > /** > * Structure to hold malloc heap > */ > struct malloc_heap { > rte_spinlock_t lock; > LIST_HEAD(, malloc_elem) free_head[RTE_HEAP_NUM_FREELISTS]; > + struct malloc_elem *first; > + struct malloc_elem *last; > + > unsigned alloc_count; > size_t total_size; > } __rte_cache_aligned; > diff --git a/lib/librte_eal/common/malloc_elem.c > b/lib/librte_eal/common/malloc_elem.c > index ea041e2..eb41200 100644 > --- a/lib/librte_eal/common/malloc_elem.c > +++ b/lib/librte_eal/common/malloc_elem.c > @@ -31,6 +31,7 @@ malloc_elem_init(struct malloc_elem *elem, > elem->heap = heap; > elem->ms = ms; > elem->prev = NULL; > + elem->next = NULL; > memset(&elem->free_list, 0, sizeof(elem->free_list)); > elem->state = ELEM_FREE; > elem->size = size; > @@ -39,15 +40,56 @@ malloc_elem_init(struct malloc_elem *elem, > set_trailer(elem); > } > > -/* > - * Initialize a dummy malloc_elem header for the end-of-memseg marker > - */ > void > -malloc_elem_mkend(struct malloc_elem *elem, struct malloc_elem *prev) > +malloc_elem_insert(struct malloc_elem *elem) > { > - malloc_elem_init(elem, prev->heap, prev->ms, 0); > - elem->prev = prev; > - elem->state = ELEM_BUSY; /* mark busy so its never merged */ > + struct malloc_elem *prev_elem, *next_elem; > + struct malloc_heap *heap = elem->heap; > + > + if (heap->first == NULL && heap->last == NULL) { > + /* if empty heap */ > + heap->first = elem; > + heap->last = elem; > + prev_elem = NULL; > + next_elem = NULL; > + } else if (elem < heap->first) { > + /* if lower than start */ > + prev_elem = NULL; > + next_elem = heap->first; > + heap->first = elem; > + } else if (elem > heap->last) { > + /* if higher than end */ > + prev_elem = heap->last; > + next_elem = NULL; > + heap->last = elem; > + } else { > + /* the new memory is somewhere inbetween start and end */ > + uint64_t dist_from_start, dist_from_end; > + > + dist_from_end = RTE_PTR_DIFF(heap->last, elem); > + dist_from_start = RTE_PTR_DIFF(elem, heap->first); > + > + /* check which is closer, and find closest list entries */ > + if (dist_from_start < dist_from_end) { > + prev_elem = heap->first; > + while (prev_elem->next < elem) > + prev_elem = prev_elem->next; > + next_elem = prev_elem->next; > + } else { > + next_elem = heap->last; > + while (next_elem->prev > elem) > + next_elem = next_elem->prev; > + prev_elem = next_elem->prev; > + } > + } > + > + /* insert new element */ > + elem->prev = prev_elem; > + elem->next = next_elem; > + if (prev_elem) > + prev_elem->next = elem; > + if (next_elem) > + next_elem->prev = elem; > }
Would it be possible here to use a TAILQ? If yes, it could be easier to read.