On Tue, Jan 25, 2022 at 07:54:52AM +0000, Klemens Nanni wrote:
> > Could we use a vm_map_assert_locked() or something similar that reflect
> > the exclusive or shared (read) lock comments? I don't trust comments.
> > It's too easy to miss a lock in a code path.
>
> This survives desktop usage, running regress and building kernels on
> amd64.
>
> I'll throw it at sparc64 soon.
>
> >
> > > So annotate functions using `size' wrt. the required lock.
Here's a better diff that asserts for read, write or any type of vm_map
lock.
vm_map_lock() and vm_map_lock_read() are used for exclusive and shared
access respectively; unless I missed something, no code path is purely
protected by vm_map_lock_read() alone, i.e. functions called with a read
lock held by the callee are also called with a write lock elsewhere.
That means my new vm_map_assert_locked_read() is currently unused, but
vm_map_assert_locked_any() and vm_map_assert_locked_write() are now used
to validate existing function comments as well as a few new places.
amd64 and sparc64 are happy with this, both daily drivers as well as
build/regress machines.
I'd appreciate more tests and reports about running with this diff and
mmap(2) unlocked.
Index: uvm_map.c
===================================================================
RCS file: /cvs/src/sys/uvm/uvm_map.c,v
retrieving revision 1.282
diff -u -p -r1.282 uvm_map.c
--- uvm_map.c 21 Dec 2021 22:21:32 -0000 1.282
+++ uvm_map.c 27 Jan 2022 10:45:47 -0000
@@ -805,6 +805,8 @@ uvm_map_sel_limits(vaddr_t *min, vaddr_t
* Fills in *start_ptr and *end_ptr to be the first and last entry describing
* the space.
* If called with prefilled *start_ptr and *end_ptr, they are to be correct.
+ *
+ * map must be at least read-locked.
*/
int
uvm_map_isavail(struct vm_map *map, struct uvm_addr_state *uaddr,
@@ -815,6 +817,8 @@ uvm_map_isavail(struct vm_map *map, stru
struct uvm_map_addr *atree;
struct vm_map_entry *i, *i_end;
+ vm_map_assert_locked_any(map);
+
if (addr + sz < addr)
return 0;
@@ -1821,6 +1825,8 @@ boolean_t
uvm_map_lookup_entry(struct vm_map *map, vaddr_t address,
struct vm_map_entry **entry)
{
+ vm_map_assert_locked_any(map);
+
*entry = uvm_map_entrybyaddr(&map->addr, address);
return *entry != NULL && !UVM_ET_ISHOLE(*entry) &&
(*entry)->start <= address && (*entry)->end > address;
@@ -2206,6 +2212,8 @@ uvm_unmap_kill_entry(struct vm_map *map,
* If remove_holes, then remove ET_HOLE entries as well.
* If markfree, entry will be properly marked free, otherwise, no replacement
* entry will be put in the tree (corrupting the tree).
+ *
+ * map must be locked.
*/
void
uvm_unmap_remove(struct vm_map *map, vaddr_t start, vaddr_t end,
@@ -2214,6 +2222,8 @@ uvm_unmap_remove(struct vm_map *map, vad
{
struct vm_map_entry *prev_hint, *next, *entry;
+ vm_map_assert_locked_write(map);
+
start = MAX(start, map->min_offset);
end = MIN(end, map->max_offset);
if (start >= end)
@@ -2976,12 +2986,17 @@ uvm_tree_sanity(struct vm_map *map, char
UVM_ASSERT(map, addr == vm_map_max(map), file, line);
}
+/*
+ * map must be at least read-locked.
+ */
void
uvm_tree_size_chk(struct vm_map *map, char *file, int line)
{
struct vm_map_entry *iter;
vsize_t size;
+ vm_map_assert_locked_any(map);
+
size = 0;
RBT_FOREACH(iter, uvm_map_addr, &map->addr) {
if (!UVM_ET_ISHOLE(iter))
@@ -4268,7 +4283,7 @@ uvm_map_submap(struct vm_map *map, vaddr
* uvm_map_checkprot: check protection in map
*
* => must allow specific protection in a fully allocated region.
- * => map mut be read or write locked by caller.
+ * => map must be read or write locked by caller.
*/
boolean_t
uvm_map_checkprot(struct vm_map *map, vaddr_t start, vaddr_t end,
@@ -4276,6 +4291,8 @@ uvm_map_checkprot(struct vm_map *map, va
{
struct vm_map_entry *entry;
+ vm_map_assert_locked_any(map);
+
if (start < map->min_offset || end > map->max_offset || start > end)
return FALSE;
if (start == end)
@@ -5557,6 +5577,46 @@ vm_map_unbusy_ln(struct vm_map *map, cha
mtx_leave(&map->flags_lock);
if (oflags & VM_MAP_WANTLOCK)
wakeup(&map->flags);
+}
+
+void
+vm_map_assert_locked_any_ln(struct vm_map *map, char *file, int line)
+{
+ LPRINTF(("map assert read or write locked: %p (at %s %d)\n", map, file,
line));
+ if ((map->flags & VM_MAP_INTRSAFE) == 0)
+ rw_assert_anylock(&map->lock);
+ else
+ MUTEX_ASSERT_LOCKED(&map->mtx);
+}
+
+void
+vm_map_assert_locked_read_ln(struct vm_map *map, char *file, int line)
+{
+ LPRINTF(("map assert read locked: %p (at %s %d)\n", map, file, line));
+ if ((map->flags & VM_MAP_INTRSAFE) == 0)
+ rw_assert_rdlock(&map->lock);
+ else
+ MUTEX_ASSERT_LOCKED(&map->mtx);
+}
+
+void
+vm_map_assert_locked_write_ln(struct vm_map *map, char *file, int line)
+{
+ LPRINTF(("map assert write locked: %p (at %s %d)\n", map, file, line));
+ if ((map->flags & VM_MAP_INTRSAFE) == 0)
+ rw_assert_wrlock(&map->lock);
+ else
+ MUTEX_ASSERT_LOCKED(&map->mtx);
+}
+
+void
+vm_map_assert_unlocked_ln(struct vm_map *map, char *file, int line)
+{
+ LPRINTF(("map assert unlocked: %p (at %s %d)\n", map, file, line));
+ if ((map->flags & VM_MAP_INTRSAFE) == 0)
+ rw_assert_unlocked(&map->lock);
+ else
+ MUTEX_ASSERT_UNLOCKED(&map->mtx);
}
#ifndef SMALL_KERNEL
Index: uvm_map.h
===================================================================
RCS file: /cvs/src/sys/uvm/uvm_map.h,v
retrieving revision 1.71
diff -u -p -r1.71 uvm_map.h
--- uvm_map.h 15 Dec 2021 12:53:53 -0000 1.71
+++ uvm_map.h 27 Jan 2022 10:19:56 -0000
@@ -262,6 +262,7 @@ RBT_PROTOTYPE(uvm_map_addr, vm_map_entry
* a atomic operations
* I immutable after creation or exec(2)
* v `vm_map_lock' (this map `lock' or `mtx')
+ * f `flags_lock'
*/
struct vm_map {
struct pmap *pmap; /* [I] Physical map */
@@ -272,9 +273,9 @@ struct vm_map {
struct uvm_map_addr addr; /* [v] Entry tree, by addr */
- vsize_t size; /* virtual size */
+ vsize_t size; /* [v] virtual size */
int ref_count; /* [a] Reference count */
- int flags; /* flags */
+ int flags; /* [f] flags */
struct mutex flags_lock; /* flags lock */
unsigned int timestamp; /* Version number */
@@ -419,6 +420,10 @@ void vm_map_downgrade_ln(struct vm_map*
void vm_map_upgrade_ln(struct vm_map*, char*, int);
void vm_map_busy_ln(struct vm_map*, char*, int);
void vm_map_unbusy_ln(struct vm_map*, char*, int);
+void vm_map_assert_locked_any_ln(struct vm_map*, char*, int);
+void vm_map_assert_locked_read_ln(struct vm_map*, char*, int);
+void vm_map_assert_locked_write_ln(struct vm_map*, char*, int);
+void vm_map_assert_unlocked_ln(struct vm_map*, char*, int);
#ifdef DIAGNOSTIC
#define vm_map_lock_try(map) vm_map_lock_try_ln(map, __FILE__, __LINE__)
@@ -430,6 +435,10 @@ void vm_map_unbusy_ln(struct vm_map*, c
#define vm_map_upgrade(map) vm_map_upgrade_ln(map, __FILE__, __LINE__)
#define vm_map_busy(map) vm_map_busy_ln(map, __FILE__, __LINE__)
#define vm_map_unbusy(map) vm_map_unbusy_ln(map, __FILE__, __LINE__)
+#define vm_map_assert_locked_any(map) vm_map_assert_locked_any_ln(map,
__FILE__, __LINE__)
+#define vm_map_assert_locked_read(map) vm_map_assert_locked_read_ln(map,
__FILE__, __LINE__)
+#define vm_map_assert_locked_write(map)
vm_map_assert_locked_write_ln(map, __FILE__, __LINE__)
+#define vm_map_assert_unlocked(map) vm_map_assert_unlocked_ln(map,
__FILE__, __LINE__)
#else
#define vm_map_lock_try(map) vm_map_lock_try_ln(map, NULL, 0)
#define vm_map_lock(map) vm_map_lock_ln(map, NULL, 0)
@@ -440,6 +449,10 @@ void vm_map_unbusy_ln(struct vm_map*, c
#define vm_map_upgrade(map) vm_map_upgrade_ln(map, NULL, 0)
#define vm_map_busy(map) vm_map_busy_ln(map, NULL, 0)
#define vm_map_unbusy(map) vm_map_unbusy_ln(map, NULL, 0)
+#define vm_map_assert_locked_any(map) vm_map_assert_locked_any_ln(map, NULL,
0)
+#define vm_map_assert_locked_read(map) vm_map_assert_locked_read_ln(map, NULL,
0)
+#define vm_map_assert_locked_write(map)
vm_map_assert_locked_write_ln(map, NULL, 0)
+#define vm_map_assert_unlocked(map) vm_map_assert_unlocked_ln(map, NULL, 0)
#endif
void uvm_map_lock_entry(struct vm_map_entry *);
Index: uvm_fault.c
===================================================================
RCS file: /cvs/src/sys/uvm/uvm_fault.c,v
retrieving revision 1.124
diff -u -p -r1.124 uvm_fault.c
--- uvm_fault.c 28 Dec 2021 13:16:28 -0000 1.124
+++ uvm_fault.c 26 Jan 2022 16:23:42 -0000
@@ -1602,6 +1602,7 @@ uvm_fault_unwire_locked(vm_map_t map, va
struct vm_page *pg;
KASSERT((map->flags & VM_MAP_INTRSAFE) == 0);
+ vm_map_assert_locked_any(map);
/*
* we assume that the area we are unwiring has actually been wired