> On 8 Aug 2016, at 10:46 PM, Mark Kettenis <[email protected]> wrote:
>
>> Date: Mon, 8 Aug 2016 21:56:23 +1000
>> From: David Gwynne <[email protected]>
>>
>> the current tracking of free static map entries is done as hand
>> rolled list manipulations using pointers in an rb_entry. it's really
>> confusing to read.
>>
>> since its simple list manipulations, this replaces the hand rolled
>> code with an SLIST.
>>
>> ok?
>
> I like this. How does this change the way entries are recycled? The
> new code will use the last entry that was freed. Was this the case
> with the old code as well?
the RB version put free items on the head (uvm.kentry_free) and removed them
from there too. the SLIST ops are the same in that respect.
i think the order that new entries from a fresh page are added in
uvm_mapent_alloc is different, but that is not the hot path by any means.
dlg
>
>
>> Index: uvm.h
>> ===================================================================
>> RCS file: /cvs/src/sys/uvm/uvm.h,v
>> retrieving revision 1.60
>> diff -u -p -r1.60 uvm.h
>> --- uvm.h 8 Oct 2015 15:58:38 -0000 1.60
>> +++ uvm.h 8 Aug 2016 11:53:57 -0000
>> @@ -69,7 +69,7 @@ struct uvm {
>> struct mutex aiodoned_lock;
>>
>> /* static kernel map entry pool */
>> - vm_map_entry_t kentry_free; /* free page pool */
>> + SLIST_HEAD(, vm_map_entry) kentry_free; /* free page pool */
>>
>> /* aio_done is locked by uvm.aiodoned_lock. */
>> TAILQ_HEAD(, buf) aio_done; /* done async i/o reqs */
>> Index: uvm_map.c
>> ===================================================================
>> RCS file: /cvs/src/sys/uvm/uvm_map.c,v
>> retrieving revision 1.219
>> diff -u -p -r1.219 uvm_map.c
>> --- uvm_map.c 30 Jul 2016 16:43:44 -0000 1.219
>> +++ uvm_map.c 8 Aug 2016 11:53:57 -0000
>> @@ -1669,25 +1669,23 @@ uvm_mapent_alloc(struct vm_map *map, int
>>
>> if (map->flags & VM_MAP_INTRSAFE || cold) {
>> mtx_enter(&uvm_kmapent_mtx);
>> - me = uvm.kentry_free;
>> - if (me == NULL) {
>> + if (SLIST_EMPTY(&uvm.kentry_free)) {
>> ne = km_alloc(PAGE_SIZE, &kv_page, &kp_dirty,
>> &kd_nowait);
>> if (ne == NULL)
>> panic("uvm_mapent_alloc: cannot allocate map "
>> "entry");
>> - for (i = 0;
>> - i < PAGE_SIZE / sizeof(struct vm_map_entry) - 1;
>> - i++)
>> - RB_LEFT(&ne[i], daddrs.addr_entry) = &ne[i + 1];
>> - RB_LEFT(&ne[i], daddrs.addr_entry) = NULL;
>> - me = ne;
>> + for (i = 0; i < PAGE_SIZE / sizeof(*ne); i++) {
>> + SLIST_INSERT_HEAD(&uvm.kentry_free,
>> + &ne[i], daddrs.addr_kentry);
>> + }
>> if (ratecheck(&uvm_kmapent_last_warn_time,
>> &uvm_kmapent_warn_rate))
>> printf("uvm_mapent_alloc: out of static "
>> "map entries\n");
>> }
>> - uvm.kentry_free = RB_LEFT(me, daddrs.addr_entry);
>> + me = SLIST_FIRST(&uvm.kentry_free);
>> + SLIST_REMOVE_HEAD(&uvm.kentry_free, daddrs.addr_kentry);
>> uvmexp.kmapent++;
>> mtx_leave(&uvm_kmapent_mtx);
>> me->flags = UVM_MAP_STATIC;
>> @@ -1725,8 +1723,7 @@ uvm_mapent_free(struct vm_map_entry *me)
>> {
>> if (me->flags & UVM_MAP_STATIC) {
>> mtx_enter(&uvm_kmapent_mtx);
>> - RB_LEFT(me, daddrs.addr_entry) = uvm.kentry_free;
>> - uvm.kentry_free = me;
>> + SLIST_INSERT_HEAD(&uvm.kentry_free, me, daddrs.addr_kentry);
>> uvmexp.kmapent--;
>> mtx_leave(&uvm_kmapent_mtx);
>> } else if (me->flags & UVM_MAP_KMEM) {
>> @@ -2795,11 +2792,10 @@ uvm_map_init(void)
>>
>> /* now set up static pool of kernel map entries ... */
>> mtx_init(&uvm_kmapent_mtx, IPL_VM);
>> - uvm.kentry_free = NULL;
>> + SLIST_INIT(&uvm.kentry_free);
>> for (lcv = 0 ; lcv < MAX_KMAPENT ; lcv++) {
>> - RB_LEFT(&kernel_map_entry[lcv], daddrs.addr_entry) =
>> - uvm.kentry_free;
>> - uvm.kentry_free = &kernel_map_entry[lcv];
>> + SLIST_INSERT_HEAD(&uvm.kentry_free,
>> + &kernel_map_entry[lcv], daddrs.addr_kentry);
>> }
>>
>> /* initialize the map-related pools. */
>> Index: uvm_map.h
>> ===================================================================
>> RCS file: /cvs/src/sys/uvm/uvm_map.h,v
>> retrieving revision 1.55
>> diff -u -p -r1.55 uvm_map.h
>> --- uvm_map.h 9 Sep 2015 23:33:37 -0000 1.55
>> +++ uvm_map.h 8 Aug 2016 11:53:57 -0000
>> @@ -161,6 +161,7 @@ union vm_map_object {
>> struct vm_map_entry {
>> union {
>> RB_ENTRY(vm_map_entry) addr_entry; /* address tree */
>> + SLIST_ENTRY(vm_map_entry) addr_kentry;
>> } daddrs;
>>
>> union {
>>
>>