Hi,
With the recent introduction of vmmap, I introduced a slowdown which
affects programs with alot of memory (browsers for instance). First of
all, since I've heard very few complaints, thanks for putting up with
this.
The reason for this e-mail is, that I have a diff. This diff should
make the new vmmap as fast as the old vmmap for large programs. If you
were hit by the slowdown or would like to test, please use the diff
below and let me know if there are any problems.
--
Ariane
Index: uvm/uvm_addr.c
===================================================================
RCS file: /cvs/src/sys/uvm/uvm_addr.c,v
retrieving revision 1.2
diff -u -d -p -r1.2 uvm_addr.c
--- uvm/uvm_addr.c 15 Mar 2012 17:52:28 -0000 1.2
+++ uvm/uvm_addr.c 23 Mar 2012 20:36:49 -0000
@@ -571,7 +571,7 @@ uaddr_rnd_select(struct vm_map *map, str
struct vmspace *vm;
vaddr_t guard_sz;
vaddr_t low_addr, high_addr;
- struct vm_map_entry *entry;
+ struct vm_map_entry *entry, *next;
vsize_t before_gap, after_gap;
vaddr_t tmp;
@@ -601,40 +601,60 @@ uaddr_rnd_select(struct vm_map *map, str
before_gap = 0;
after_gap = guard_sz;
+ hint -= MIN(hint, before_gap);
/*
- * Find the first entry at or after hint with free space.
+ * Use the augmented address tree to look up the first entry
+ * at or after hint with sufficient space.
*
- * Since we need an entry that is on the free-list, search until
- * we hit an entry that is owned by our uaddr.
+ * This code is the original optimized code, but will fail if the
+ * subtree it looks at does have sufficient space, but fails to meet
+ * the align constraint.
+ *
+ * Guard: subtree is not exhausted and max(fspace) >= required.
*/
- for (entry = uvm_map_entrybyaddr(&map->addr, hint);
- entry != NULL &&
- uvm_map_uaddr_e(map, entry) != uaddr;
- entry = RB_NEXT(uvm_map_addr, &map->addr, entry)) {
- /* Fail if we search past uaddr_maxaddr. */
- if (VMMAP_FREE_START(entry) >= uaddr->uaddr_maxaddr) {
- entry = NULL;
- break;
- }
- }
+ entry = uvm_map_entrybyaddr(&map->addr, hint);
- for ( /* initial entry filled in above */ ;
- entry != NULL && VMMAP_FREE_START(entry) < uaddr->uaddr_maxaddr;
- entry = TAILQ_NEXT(entry, dfree.tailq)) {
- if (uvm_addr_fitspace(&low_addr, &high_addr,
+ /* Walk up the tree, until there is at least sufficient space. */
+ while (entry != NULL &&
+ entry->fspace_augment < before_gap + after_gap + sz)
+ entry = RB_PARENT(entry, daddrs.addr_entry);
+
+ while (entry != NULL) {
+ /* Test if this fits. */
+ if (VMMAP_FREE_END(entry) > hint &&
+ uvm_addr_fitspace(&low_addr, &high_addr,
MAX(uaddr->uaddr_minaddr, VMMAP_FREE_START(entry)),
MIN(uaddr->uaddr_maxaddr, VMMAP_FREE_END(entry)),
sz, align, offset, before_gap, after_gap) == 0) {
*entry_out = entry;
- if (hint >= low_addr && hint <= high_addr)
- *addr_out = hint;
- else
- *addr_out = low_addr;
+ *addr_out = low_addr;
return 0;
}
+
+ /* RB_NEXT, but skip subtrees that cannot possible fit. */
+ next = RB_RIGHT(entry, daddrs.addr_entry);
+ if (next != NULL &&
+ next->fspace_augment >= before_gap + after_gap + sz) {
+ entry = next;
+ while ((next = RB_LEFT(entry, daddrs.addr_entry)) !=
+ NULL)
+ entry = next;
+ } else {
+do_parent:
+ next = RB_PARENT(entry, daddrs.addr_entry);
+ if (next == NULL)
+ entry = NULL;
+ else if (RB_LEFT(next, daddrs.addr_entry) == entry)
+ entry = next;
+ else {
+ entry = next;
+ goto do_parent;
+ }
+ }
}
+ /* Lookup failed. */
return ENOMEM;
}
@@ -654,34 +674,7 @@ void
uaddr_rnd_insert(struct vm_map *map, struct uvm_addr_state *uaddr_p,
struct vm_map_entry *entry)
{
- struct uaddr_rnd_state *uaddr;
- struct vm_map_entry *prev;
-
- uaddr = (struct uaddr_rnd_state*)uaddr_p;
- KASSERT(entry == RB_FIND(uvm_map_addr, &map->addr, entry));
-
- /*
- * Make prev the first vm_map_entry before entry.
- */
- for (prev = RB_PREV(uvm_map_addr, &map->addr, entry);
- prev != NULL;
- prev = RB_PREV(uvm_map_addr, &map->addr, prev)) {
- /* Stop and fail when reaching uaddr minaddr. */
- if (VMMAP_FREE_START(prev) < uaddr_p->uaddr_minaddr) {
- prev = NULL;
- break;
- }
-
- KASSERT(prev->etype & UVM_ET_FREEMAPPED);
- if (uvm_map_uaddr_e(map, prev) == uaddr_p)
- break;
- }
-
- /* Perform insertion. */
- if (prev == NULL)
- TAILQ_INSERT_HEAD(&uaddr->ur_free, entry, dfree.tailq);
- else
- TAILQ_INSERT_AFTER(&uaddr->ur_free, prev, entry, dfree.tailq);
+ return;
}
/*
@@ -691,10 +684,7 @@ void
uaddr_rnd_remove(struct vm_map *map, struct uvm_addr_state *uaddr_p,
struct vm_map_entry *entry)
{
- struct uaddr_rnd_state *uaddr;
-
- uaddr = (struct uaddr_rnd_state*)uaddr_p;
- TAILQ_REMOVE(&uaddr->ur_free, entry, dfree.tailq);
+ return;
}
#if defined(DEBUG) || defined(DDB)
Index: uvm/uvm_map.c
===================================================================
RCS file: /cvs/src/sys/uvm/uvm_map.c,v
retrieving revision 1.150
diff -u -d -p -r1.150 uvm_map.c
--- uvm/uvm_map.c 15 Mar 2012 22:22:28 -0000 1.150
+++ uvm/uvm_map.c 23 Mar 2012 17:10:37 -0000
@@ -157,6 +157,8 @@ int uvm_map_findspace(struct vm_map*,
struct vm_map_entry**, struct vm_map_entry**,
vaddr_t*, vsize_t, vaddr_t, vaddr_t, vm_prot_t,
vaddr_t);
+vsize_t uvm_map_addr_augment_get(struct vm_map_entry*);
+void uvm_map_addr_augment(struct vm_map_entry*);
/*
* Tree management functions.
@@ -405,11 +407,16 @@ uvm_mapent_free_insert(struct vm_map *ma
UVM_MAP_REQ_WRITE(map);
/* Actual insert: forward to uaddr pointer. */
- fun = uaddr->uaddr_functions;
- KDASSERT(fun != NULL);
- if (fun->uaddr_free_insert != NULL)
- (*fun->uaddr_free_insert)(map, uaddr, entry);
- entry->etype |= UVM_ET_FREEMAPPED;
+ if (uaddr != NULL) {
+ fun = uaddr->uaddr_functions;
+ KDASSERT(fun != NULL);
+ if (fun->uaddr_free_insert != NULL)
+ (*fun->uaddr_free_insert)(map, uaddr, entry);
+ entry->etype |= UVM_ET_FREEMAPPED;
+ }
+
+ /* Update fspace augmentation. */
+ uvm_map_addr_augment(entry);
}
/*
@@ -421,14 +428,16 @@ uvm_mapent_free_remove(struct vm_map *ma
{
const struct uvm_addr_functions *fun;
- KASSERT((entry->etype & UVM_ET_FREEMAPPED) != 0);
+ KASSERT((entry->etype & UVM_ET_FREEMAPPED) != 0 || uaddr == NULL);
KASSERT(uvm_map_uaddr_e(map, entry) == uaddr);
UVM_MAP_REQ_WRITE(map);
- fun = uaddr->uaddr_functions;
- if (fun->uaddr_free_remove != NULL)
- (*fun->uaddr_free_remove)(map, uaddr, entry);
- entry->etype &= ~UVM_ET_FREEMAPPED;
+ if (uaddr != NULL) {
+ fun = uaddr->uaddr_functions;
+ if (fun->uaddr_free_remove != NULL)
+ (*fun->uaddr_free_remove)(map, uaddr, entry);
+ entry->etype &= ~UVM_ET_FREEMAPPED;
+ }
}
/*
@@ -889,6 +898,46 @@ uvm_map_findspace(struct vm_map *map, st
return ENOMEM;
}
+/* Calculate entry augmentation value. */
+vsize_t
+uvm_map_addr_augment_get(struct vm_map_entry *entry)
+{
+ vsize_t augment;
+ struct vm_map_entry *left, *right;
+
+ augment = entry->fspace;
+ if ((left = RB_LEFT(entry, daddrs.addr_entry)) != NULL)
+ augment = MAX(augment, left->fspace_augment);
+ if ((right = RB_RIGHT(entry, daddrs.addr_entry)) != NULL)
+ augment = MAX(augment, right->fspace_augment);
+ return augment;
+}
+
+/*
+ * Update augmentation data in entry.
+ */
+void
+uvm_map_addr_augment(struct vm_map_entry *entry)
+{
+ vsize_t augment;
+
+ while (entry != NULL) {
+ /* Calculate value for augmentation. */
+ augment = uvm_map_addr_augment_get(entry);
+
+ /*
+ * Descend update.
+ * Once we find an entry that already has the correct value,
+ * stop, since it means all its parents will use the correct
+ * value too.
+ */
+ if (entry->fspace_augment == augment)
+ return;
+ entry->fspace_augment = augment;
+ entry = RB_PARENT(entry, daddrs.addr_entry);
+ }
+}
+
/*
* uvm_map: establish a valid mapping in map
*
@@ -1254,18 +1303,15 @@ uvm_mapent_merge(struct vm_map *map, str
*/
free = uvm_map_uaddr_e(map, e1);
- if (free)
- uvm_mapent_free_remove(map, free, e1);
+ uvm_mapent_free_remove(map, free, e1);
free = uvm_map_uaddr_e(map, e2);
- if (free)
- uvm_mapent_free_remove(map, free, e2);
+ uvm_mapent_free_remove(map, free, e2);
uvm_mapent_addr_remove(map, e2);
e1->end = e2->end;
e1->guard = e2->guard;
e1->fspace = e2->fspace;
- if (free)
- uvm_mapent_free_insert(map, free, e1);
+ uvm_mapent_free_insert(map, free, e1);
DEAD_ENTRY_PUSH(dead, e2);
return e1;
@@ -1402,8 +1448,7 @@ uvm_map_mkentry(struct vm_map *map, stru
* Reset free space in first.
*/
free = uvm_map_uaddr_e(map, first);
- if (free)
- uvm_mapent_free_remove(map, free, first);
+ uvm_mapent_free_remove(map, free, first);
first->guard = 0;
first->fspace = 0;
@@ -1416,8 +1461,7 @@ uvm_map_mkentry(struct vm_map *map, stru
KDASSERT(last->start == last->end);
free = uvm_map_uaddr_e(map, last);
- if (free)
- uvm_mapent_free_remove(map, free, last);
+ uvm_mapent_free_remove(map, free, last);
uvm_mapent_addr_remove(map, last);
DEAD_ENTRY_PUSH(dead, last);
}
@@ -1639,16 +1683,14 @@ uvm_mapent_mkfree(struct vm_map *map, st
addr = entry->start;
end = VMMAP_FREE_END(entry);
free = uvm_map_uaddr_e(map, entry);
- if (free)
- uvm_mapent_free_remove(map, free, entry);
+ uvm_mapent_free_remove(map, free, entry);
uvm_mapent_addr_remove(map, entry);
DEAD_ENTRY_PUSH(dead, entry);
if (markfree) {
if (prev) {
free = uvm_map_uaddr_e(map, prev);
- if (free)
- uvm_mapent_free_remove(map, free, prev);
+ uvm_mapent_free_remove(map, free, prev);
}
*prev_ptr = uvm_map_fix_space(map, prev, addr, end, 0);
}
@@ -2372,8 +2414,7 @@ uvm_map_splitentry(struct vm_map *map, s
* Free space will change, unlink from free space tree.
*/
free = uvm_map_uaddr_e(map, orig);
- if (free)
- uvm_mapent_free_remove(map, free, orig);
+ uvm_mapent_free_remove(map, free, orig);
adj = split - orig->start;
@@ -2421,11 +2462,9 @@ uvm_map_splitentry(struct vm_map *map, s
free_before = free;
else
free_before = uvm_map_uaddr_e(map, orig);
- if (free_before)
- uvm_mapent_free_insert(map, free_before, orig);
+ uvm_mapent_free_insert(map, free_before, orig);
uvm_mapent_addr_insert(map, next);
- if (free)
- uvm_mapent_free_insert(map, free, next);
+ uvm_mapent_free_insert(map, free, next);
uvm_tree_sanity(map, __FILE__, __LINE__);
}
@@ -2709,6 +2748,7 @@ uvm_map_printit(struct vm_map *map, bool
in_free ? 'T' : 'F',
entry->guard,
VMMAP_FREE_START(entry), VMMAP_FREE_END(entry));
+ (*pr)("\tfspace_augment=%lu\n", entry->fspace_augment);
(*pr)("\tfreemapped=%c, uaddr=%p\n",
(entry->etype & UVM_ET_FREEMAPPED) ? 'T' : 'F', free);
if (free) {
@@ -4232,8 +4272,7 @@ uvm_map_clip_start(struct vm_map *map, s
/* Unlink original. */
free = uvm_map_uaddr_e(map, entry);
- if (free)
- uvm_mapent_free_remove(map, free, entry);
+ uvm_mapent_free_remove(map, free, entry);
uvm_mapent_addr_remove(map, entry);
/* Copy entry. */
@@ -4243,8 +4282,7 @@ uvm_map_clip_start(struct vm_map *map, s
/* Put new entry in place of original entry. */
uvm_mapent_addr_insert(map, tmp);
- if (free)
- uvm_mapent_free_insert(map, free, tmp);
+ uvm_mapent_free_insert(map, free, tmp);
/* Invoke splitentry. */
uvm_map_splitentry(map, tmp, entry, addr);
@@ -4502,8 +4540,7 @@ uvm_map_freelist_update_clear(struct vm_
next = RB_NEXT(uvm_map_addr, &map->addr, entry);
free = uvm_map_uaddr_e(map, entry);
- if (free)
- uvm_mapent_free_remove(map, free, entry);
+ uvm_mapent_free_remove(map, free, entry);
if (prev != NULL && entry->start == entry->end) {
prev->fspace += VMMAP_FREE_END(entry) - entry->end;
@@ -4664,7 +4701,7 @@ uvm_map_fix_space(struct vm_map *map, st
* We'll start a new entry and add to that entry
* instead.
*/
- if (entry != NULL && entfree != NULL)
+ if (entry != NULL)
uvm_mapent_free_insert(map, entfree, entry);
/* New entry for new uaddr. */
@@ -4690,7 +4727,7 @@ uvm_map_fix_space(struct vm_map *map, st
min = lmax;
}
/* Finally put entry on the uaddr state. */
- if (entry != NULL && entfree != NULL)
+ if (entry != NULL)
uvm_mapent_free_insert(map, entfree, entry);
return entry;
@@ -4983,8 +5020,11 @@ vm_map_unbusy_ln(struct vm_map *map, cha
}
+#undef RB_AUGMENT
+#define RB_AUGMENT(x) uvm_map_addr_augment((x))
RB_GENERATE(uvm_map_addr, vm_map_entry, daddrs.addr_entry,
uvm_mapentry_addrcmp);
+#undef RB_AUGMENT
/*
Index: uvm/uvm_map.h
===================================================================
RCS file: /cvs/src/sys/uvm/uvm_map.h,v
retrieving revision 1.47
diff -u -d -p -r1.47 uvm_map.h
--- uvm/uvm_map.h 9 Mar 2012 13:01:29 -0000 1.47
+++ uvm/uvm_map.h 22 Mar 2012 14:12:59 -0000
@@ -197,6 +197,8 @@ struct vm_map_entry {
#define UVM_MAP_STATIC 0x01 /* static map entry */
#define UVM_MAP_KMEM 0x02 /* from kmem entry pool */
+
+ vsize_t fspace_augment; /* max(fspace) in subtree */
};
#define VM_MAPENT_ISWIRED(entry) ((entry)->wired_count != 0)